code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
# Return True if there is node that has not iterated.
lowerCamelCase__ : Optional[Any] = [False] * len(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = []
queue.append(UpperCamelCase )
lowerCamelCase__ : List[str] = True
while queue:
lowerCamelCase__ : Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase )
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = u
return visited[t]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
# This array is filled by BFS and to store path
lowerCamelCase__ : Tuple = [-1] * (len(UpperCamelCase ))
lowerCamelCase__ : Dict = 0
while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = float("""Inf""" )
lowerCamelCase__ : Optional[int] = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ : Optional[int] = min(UpperCamelCase , graph[parent[s]][s] )
lowerCamelCase__ : Optional[Any] = parent[s]
max_flow += path_flow
lowerCamelCase__ : List[Any] = sink
while v != source:
lowerCamelCase__ : Optional[int] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ : Dict = parent[v]
return max_flow
_A : str =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_A , _A : Optional[Any] =0, 5
print(ford_fulkerson(graph, source, sink))
| 41
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 0
|
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __UpperCAmelCase ( _lowerCamelCase ):
# to overwrite at feature extractactor specific tests
__lowercase = None
__lowercase = None
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'feature_size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'sampling_rate' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'padding_value' ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , processed_features[input_name] ) ) )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def lowerCamelCase ( self , lowerCAmelCase_=False ):
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase_ ):
_snake_case = len(input[0] )
for input_slice in input[1:]:
if len(lowerCAmelCase_ ) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if not np.allclose(np.asarray(lowerCAmelCase_ ) , np.asarray(lowerCAmelCase_ ) , atol=1E-3 ):
return False
return True
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase_ )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = self.feat_extract_tester.seq_length_diff
_snake_case = self.feat_extract_tester.max_seq_length + pad_diff
_snake_case = self.feat_extract_tester.min_seq_length
_snake_case = self.feat_extract_tester.batch_size
_snake_case = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[-1] ) )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )
_snake_case = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='max_length' )[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=lowerCAmelCase_ , return_tensors='np' )
_snake_case = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_snake_case = feat_extract.pad(lowerCAmelCase_ , pad_to_multiple_of=10 )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , pad_to_multiple_of=10 )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase_ , return_tensors='np' , )
_snake_case = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_snake_case = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def lowerCamelCase ( self , lowerCAmelCase_=False ):
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase_ ):
_snake_case = len(input[0] )
for input_slice in input[1:]:
if len(lowerCAmelCase_ ) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if not np.allclose(np.asarray(lowerCAmelCase_ ) , np.asarray(lowerCAmelCase_ ) , atol=1E-3 ):
return False
return True
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase_ )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) )
_snake_case = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
# truncate to smallest with np
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=lowerCAmelCase_ , )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
_snake_case = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
# truncate to middle
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=lowerCAmelCase_ , return_tensors='np' , )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
_snake_case = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , truncation=lowerCAmelCase_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='longest' , truncation=lowerCAmelCase_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='longest' , truncation=lowerCAmelCase_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='max_length' , truncation=lowerCAmelCase_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_snake_case = 12
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCAmelCase_ , )
_snake_case = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_snake_case = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_snake_case = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase_ )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_dict
_snake_case = True
_snake_case = self.feature_extraction_class(**lowerCAmelCase_ )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = [len(lowerCAmelCase_ ) for x in speech_inputs]
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_dict
_snake_case = True
_snake_case = self.feature_extraction_class(**lowerCAmelCase_ )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = [len(lowerCAmelCase_ ) for x in speech_inputs]
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = min(lowerCAmelCase_ )
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='np' )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 42
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[int] = ["""image_processor""", """tokenizer"""]
a__ : Optional[int] = """LayoutLMv2ImageProcessor"""
a__ : Any = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __lowercase=None , __lowercase=None , **__lowercase) -> Any:
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
__UpperCamelCase :int = kwargs.pop('''feature_extractor''')
__UpperCamelCase :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__lowercase , __lowercase)
def __call__( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = True , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = 0 , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = True , __lowercase = None , **__lowercase , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''')
# first, apply the image processor
__UpperCamelCase :int = self.image_processor(images=__lowercase , return_tensors=__lowercase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowercase , __lowercase):
__UpperCamelCase :Dict = [text] # add batch dimension (as the image processor always adds a batch dimension)
__UpperCamelCase :Optional[int] = features['''words''']
__UpperCamelCase :List[Any] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_token_type_ids=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
# add pixel values
__UpperCamelCase :Dict = features.pop('''pixel_values''')
if return_overflowing_tokens is True:
__UpperCamelCase :Optional[Any] = self.get_overflowing_images(__lowercase , encoded_inputs['''overflow_to_sample_mapping'''])
__UpperCamelCase :int = images
return encoded_inputs
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Tuple:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__UpperCamelCase :int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(__lowercase) != len(__lowercase):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(__lowercase)} and {len(__lowercase)}""")
return images_with_overflow
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> List[Any]:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> List[str]:
return self.tokenizer.decode(*__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self) -> List[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowercase , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self) -> str:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowercase , )
return self.image_processor
| 43
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 0
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = {'vocab_file': 'spiece.model'}
_a : Dict = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_a : List[str] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
_a : str = '▁'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="</s>" , a__="<unk>" , a__="<pad>" , a__=100 , a__=None , a__ = None , a__=True , **a__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : List[Any] = [F"<extra_id_{i}>" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : Union[str, Any] = len(set(filter(lambda a__ : bool("""extra_id""" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
_lowerCAmelCase : str = legacy
_lowerCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_lowerCAmelCase : Any = vocab_file
_lowerCAmelCase : Any = extra_ids
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def __A ( a__ , a__ , a__ ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_lowerCAmelCase : List[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , a__ , )
return max_model_length
@property
def __A ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def __A ( self ):
return list(
set(filter(lambda a__ : bool(re.search(r"""<extra_id_\d+>""" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ):
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def __A ( self , a__ ):
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : Optional[Any] = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
_lowerCAmelCase : Tuple = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self , a__ ):
_lowerCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , a__ , **a__ ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_lowerCAmelCase : Union[str, Any] = SPIECE_UNDERLINE + text.replace(a__ , """ """ )
return super().tokenize(a__ , **a__ )
def __A ( self , a__ , **a__ ):
if not self.legacy:
_lowerCAmelCase : int = text.startswith(a__ )
if is_first:
_lowerCAmelCase : Tuple = text[1:]
_lowerCAmelCase : List[Any] = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(a__ ):
_lowerCAmelCase : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __A ( self , a__ ):
if token.startswith("""<extra_id_""" ):
_lowerCAmelCase : Any = re.match(r"""<extra_id_(\d+)>""" , a__ )
_lowerCAmelCase : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def __A ( self , a__ ):
if index < self.sp_model.get_piece_size():
_lowerCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(a__ )
else:
_lowerCAmelCase : Union[str, Any] = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : List[str] = """"""
_lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(a__ )
_lowerCAmelCase : Tuple = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , """wb""" ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 44
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> list[int]:
if length <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(lowerCAmelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 45
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'speech_to_text_2'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowercase=10_000 , lowercase=6 , lowercase=2_048 , lowercase=4 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=1_024 , **lowercase , ) -> Optional[Any]:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , )
| 46
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 0
|
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(_UpperCamelCase ), magnitude * sin(_UpperCamelCase )]
return [magnitude * cos(radians(_UpperCamelCase ) ), magnitude * sin(radians(_UpperCamelCase ) )]
def _lowerCAmelCase ( _UpperCamelCase : NDArray[floataa] , _UpperCamelCase : NDArray[floataa] , _UpperCamelCase : float = 10**-1 ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =cross(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =sum(_UpperCamelCase )
return abs(_UpperCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase : Optional[Any] = array(
[
polar_force(7_1_8.4, 1_8_0 - 3_0),
polar_force(8_7_9.5_4, 4_5),
polar_force(1_0_0, -9_0),
]
)
lowerCamelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase : str = array(
[
polar_force(3_0 * 9.8_1, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
lowerCamelCase : str = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase : Optional[int] = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
lowerCamelCase : int = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 47
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 0
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """bart"""
lowerCamelCase_ : List[str] = ["""past_key_values"""]
lowerCamelCase_ : Dict = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCamelCase__=5_0265 , UpperCamelCase__=1024 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__="gelu" , UpperCamelCase__=1024 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=0.0 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=3 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=2 , UpperCamelCase__=2 , **UpperCamelCase__ , ) -> Optional[int]:
lowerCamelCase : int = vocab_size
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : Tuple = d_model
lowerCamelCase : Optional[int] = encoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Tuple = decoder_ffn_dim
lowerCamelCase : Tuple = decoder_layers
lowerCamelCase : str = decoder_attention_heads
lowerCamelCase : Union[str, Any] = dropout
lowerCamelCase : Optional[int] = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = init_std
lowerCamelCase : Optional[Any] = encoder_layerdrop
lowerCamelCase : List[str] = decoder_layerdrop
lowerCamelCase : Optional[int] = classifier_dropout
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : List[Any] = encoder_layers
lowerCamelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , UpperCamelCase__ ):
lowerCamelCase : str = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase : List[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase : int = {0: "batch"}
lowerCamelCase : str = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase : List[Any] = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase : Dict = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase : Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase , lowerCamelCase : str = self.num_layers
for i in range(UpperCamelCase__ ):
lowerCamelCase : Any = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"}
else:
lowerCamelCase : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase : List[str] = super().outputs
else:
lowerCamelCase : Union[str, Any] = super(UpperCamelCase__ , self ).outputs
if self.use_past:
lowerCamelCase , lowerCamelCase : List[str] = self.num_layers
for i in range(UpperCamelCase__ ):
lowerCamelCase : Dict = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase : str = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
lowerCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
lowerCamelCase : Optional[int] = seq_length if not self.use_past else 1
lowerCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase : int = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase , lowerCamelCase : Union[str, Any] = common_inputs["input_ids"].shape
lowerCamelCase : Union[str, Any] = common_inputs["decoder_input_ids"].shape[1]
lowerCamelCase , lowerCamelCase : List[str] = self.num_attention_heads
lowerCamelCase : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase : str = decoder_seq_length + 3
lowerCamelCase : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase : List[str] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
lowerCamelCase : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase , lowerCamelCase : Tuple = self.num_layers
lowerCamelCase : str = min(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : str = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
lowerCamelCase : Tuple = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
lowerCamelCase : Any = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
lowerCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[Any] = seqlen + 2
lowerCamelCase , lowerCamelCase : Tuple = self.num_layers
lowerCamelCase , lowerCamelCase : Tuple = self.num_attention_heads
lowerCamelCase : int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase : Any = common_inputs["attention_mask"].dtype
lowerCamelCase : Any = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
lowerCamelCase : Optional[Any] = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase : Dict = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase : str = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase : Union[str, Any] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase : Any = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
lowerCamelCase : int = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
lowerCamelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase : Union[str, Any] = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
lowerCamelCase : int = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 48
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = {}
__a = {}
__a = {}
# preprocess args
if "points_per_batch" in kwargs:
__a = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__a = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__a = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__a = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__a = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__a = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__a = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__a = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__a = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__a = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__a = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__a = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.size['''longest_edge''']
__a , __a , __a , __a = self.image_processor.generate_crop_boxes(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
__a = self.get_inference_context()
with inference_context():
__a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
__a = image_embeddings
__a = grid_points.shape[1]
__a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = grid_points[:, i : i + points_per_batch, :, :]
__a = input_labels[:, i : i + points_per_batch]
__a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ):
'''simple docstring'''
__a = model_inputs.pop('''input_boxes''')
__a = model_inputs.pop('''is_last''')
__a = model_inputs.pop('''original_sizes''').tolist()
__a = model_inputs.pop('''reshaped_input_sizes''').tolist()
__a = self.model(**__SCREAMING_SNAKE_CASE)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a = model_outputs['''pred_masks''']
__a = self.image_processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE)
__a = model_outputs['''iou_scores''']
__a , __a , __a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ):
'''simple docstring'''
__a = []
__a = []
__a = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = self.image_processor.post_process_for_mask_generation(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
for output in model_outputs:
for k, v in output.items():
extra[k].append(__SCREAMING_SNAKE_CASE)
__a = {}
if output_rle_mask:
__a = rle_mask
if output_bboxes_mask:
__a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 49
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 0
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
lowerCamelCase__ : Tuple = sorted(string.lower() )
return len(_UpperCAmelCase ) == len(set(_UpperCAmelCase ) )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = input("""Enter a string """).strip()
_UpperCAmelCase : Optional[int] = is_isogram(input_str)
print(F"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 50
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __snake_case :
def __init__( self : str , _snake_case : Tuple , _snake_case : List[str]=13 , _snake_case : Tuple=7 , _snake_case : Optional[Any]=True , _snake_case : List[str]=True , _snake_case : Dict=False , _snake_case : List[str]=True , _snake_case : Optional[int]=99 , _snake_case : Optional[int]=32 , _snake_case : Dict=5 , _snake_case : List[Any]=4 , _snake_case : Optional[int]=37 , _snake_case : Optional[Any]="gelu" , _snake_case : Tuple=0.1 , _snake_case : Tuple=0.1 , _snake_case : int=512 , _snake_case : int=16 , _snake_case : List[Any]=2 , _snake_case : Any=0.0_2 , _snake_case : str=3 , _snake_case : Union[str, Any]=4 , _snake_case : str=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : str):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : int , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OpenLlamaModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase ( self : Dict , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = True
UpperCAmelCase_ = OpenLlamaModel(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : int , _snake_case : List[Any] , _snake_case : Any , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , ):
"""simple docstring"""
UpperCAmelCase_ = OpenLlamaForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : str , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Dict , ):
"""simple docstring"""
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = OpenLlamaForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
# first forward pass
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
UpperCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1)
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['''hidden_states'''][0]
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3))
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCAmelCase__ : int = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Dict = False
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = OpenLlamaModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = input_dict['''input_ids''']
UpperCAmelCase_ = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCAmelCase_ = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = '''single_label_classification'''
UpperCAmelCase_ = input_dict['''input_ids''']
UpperCAmelCase_ = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCAmelCase_ = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = '''multi_label_classification'''
UpperCAmelCase_ = input_dict['''input_ids''']
UpperCAmelCase_ = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
UpperCAmelCase_ = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''')
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)])
def lowerCamelCase ( self : List[Any] , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ids_tensor([1, 10] , config.vocab_size)
UpperCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ = OpenLlamaModel(_snake_case)
original_model.to(_snake_case)
original_model.eval()
UpperCAmelCase_ = original_model(_snake_case).last_hidden_state
UpperCAmelCase_ = original_model(_snake_case).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ = {'''type''': scaling_type, '''factor''': 1_0.0}
UpperCAmelCase_ = OpenLlamaModel(_snake_case)
scaled_model.to(_snake_case)
scaled_model.eval()
UpperCAmelCase_ = scaled_model(_snake_case).last_hidden_state
UpperCAmelCase_ = scaled_model(_snake_case).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-5))
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5))
| 51
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 0
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
UpperCamelCase : Union[str, Any] = ksize + 1
UpperCamelCase : str = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCAmelCase ):
for x in range(_lowerCAmelCase ):
# distance from center
UpperCamelCase : Any = x - ksize // 2
UpperCamelCase : Any = y - ksize // 2
# degree to radiant
UpperCamelCase : int = theta / 180 * np.pi
UpperCamelCase : List[Any] = np.cos(_theta )
UpperCamelCase : Optional[Any] = np.sin(_theta )
# get kernel x
UpperCamelCase : Dict = cos_theta * px + sin_theta * py
# get kernel y
UpperCamelCase : Optional[Any] = -sin_theta * px + cos_theta * py
# fill kernel
UpperCamelCase : Optional[Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase : Union[str, Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase : Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__lowerCamelCase : Optional[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase : Tuple = out / out.max() * 255
__lowerCamelCase : Optional[Any] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 52
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : int ='''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase__ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : str=None , __lowercase : List[Any]=None , __lowercase : Dict=None , __lowercase : int=None , __lowercase : Union[str, Any]=None , __lowercase : Dict=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
__UpperCamelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__UpperCamelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__UpperCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : List[Any] , __A : List[str]=1_3 , __A : str=7 , __A : Any=True , __A : Optional[Any]=False , __A : Optional[int]=9_9 , __A : str=1_6 , __A : List[str]=2 , __A : str=4 , __A : str=4 , __A : Optional[Any]="gelu" , __A : Dict=0.1 , __A : Optional[Any]=0.1 , __A : Tuple=3_2 , __A : Dict=2 , __A : Tuple=1 , __A : List[str]=0 , __A : Optional[int]=0.02 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
__UpperCamelCase = initializer_range
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCamelCase = shift_tokens_right(__A , 1 , 2 )
__UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__A , )
__UpperCamelCase = prepare_blenderbot_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self : List[str] , __A : Dict , __A : Tuple , __A : Optional[int] ):
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(__A )
__UpperCamelCase = model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __A , __A )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __A , decoder_attention_mask=__A , past_key_values=__A , decoder_position_ids=__A , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __A , decoder_attention_mask=__A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__A , )
__UpperCamelCase = model.decode(__A , __A )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _lowerCamelCase ( self : Optional[int] , __A : List[Any] , __A : Union[str, Any] , __A : Any ):
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(__A )
__UpperCamelCase = model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __A , __A )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __A , decoder_attention_mask=__A , past_key_values=__A , decoder_position_ids=__A , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__A , decoder_position_ids=__A , )
__UpperCamelCase = model.decode(__A , __A , decoder_attention_mask=__A )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =99
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__UpperCamelCase = input_ids.shape[0]
__UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self._get_config_and_data()
__UpperCamelCase = FlaxBlenderbotSmallForConditionalGeneration(__A )
__UpperCamelCase = lm_model(input_ids=__A )
__UpperCamelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , __A )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__UpperCamelCase = FlaxBlenderbotSmallForConditionalGeneration(__A )
__UpperCamelCase = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__UpperCamelCase = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__UpperCamelCase = lm_model(input_ids=__A , decoder_input_ids=__A )
__UpperCamelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , __A )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__UpperCamelCase = shift_tokens_right(__A , 1 , 2 )
__UpperCamelCase = np.equal(__A , 1 ).astype(np.floataa ).sum()
__UpperCamelCase = np.equal(__A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case ( __lowerCamelCase , unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =True
SCREAMING_SNAKE_CASE_ : Tuple =(
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =(FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = FlaxBlenderbotSmallModelTester(self )
def _lowerCamelCase ( self : int ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__A , __A , __A )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__A , __A , __A )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(__A , __A )
__UpperCamelCase = model_class(__A )
@jax.jit
def encode_jitted(__A : List[str] , __A : List[str]=None , **__A : Dict ):
return model.encode(input_ids=__A , attention_mask=__A )
with self.subTest('JIT Enabled' ):
__UpperCamelCase = encode_jitted(**__A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) )
for jitted_output, output in zip(__A , __A ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self : str ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(__A )
__UpperCamelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__UpperCamelCase = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(__A : List[Any] , __A : Tuple , __A : List[Any] ):
return model.decode(
decoder_input_ids=__A , decoder_attention_mask=__A , encoder_outputs=__A , )
with self.subTest('JIT Enabled' ):
__UpperCamelCase = decode_jitted(**__A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) )
for jitted_output, output in zip(__A , __A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self : str ):
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCamelCase = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCamelCase = model(__A )
self.assertIsNotNone(__A )
| 53
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : int = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 0
|
'''simple docstring'''
import numpy as np
def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float = 1E-1_2 , UpperCAmelCase_ : int = 100 , ):
assert np.shape(UpperCAmelCase_ )[0] == np.shape(UpperCAmelCase_ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase_ )[0] == np.shape(UpperCAmelCase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase_ ) == np.iscomplexobj(UpperCAmelCase_ )
lowerCamelCase_ = np.iscomplexobj(UpperCAmelCase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 1E1_2
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase_ = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
# Normalize the resulting output vector.
lowerCamelCase_ = w / np.linalg.norm(UpperCAmelCase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase_ = vector.conj().T if is_complex else vector.T
lowerCamelCase_ = np.dot(UpperCAmelCase_ , np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Check convergence.
lowerCamelCase_ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase_ = True
lowerCamelCase_ = lambda_
if is_complex:
lowerCamelCase_ = np.real(lambda_ )
return lambda_, vector
def __snake_case ( ):
lowerCamelCase_ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase_ = np.array([41, 4, 20] )
lowerCamelCase_ = real_input_matrix.astype(np.complexaaa )
lowerCamelCase_ = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase_ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase_ = real_input_matrix
lowerCamelCase_ = real_vector
elif problem_type == "complex":
lowerCamelCase_ = complex_input_matrix
lowerCamelCase_ = complex_vector
# Our implementation.
lowerCamelCase_ ,lowerCamelCase_ = power_iteration(UpperCAmelCase_ , UpperCAmelCase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase_ ,lowerCamelCase_ = np.linalg.eigh(UpperCAmelCase_ )
# Last eigenvalue is the maximum one.
lowerCamelCase_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase_ ) - np.abs(UpperCAmelCase_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 55
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a : Dict = re.compile(r'\s+')
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(__UpperCAmelCase, '''''', example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = [len(__UpperCAmelCase ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(__UpperCAmelCase ), "line_max": max(__UpperCAmelCase )}
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=5 ) -> Dict:
'''simple docstring'''
snake_case_ = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
snake_case_ = example['''content'''].splitlines()
for _, line in zip(range(__UpperCAmelCase ), __UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=5, __UpperCAmelCase=0.0_5 ) -> Optional[int]:
'''simple docstring'''
snake_case_ = ['''unit tests''', '''test file''', '''configuration file''']
snake_case_ = example['''content'''].splitlines()
snake_case_ = 0
snake_case_ = 0
# first test
for _, line in zip(range(__UpperCAmelCase ), __UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ = example['''content'''].count('''\n''' )
snake_case_ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = ['''def ''', '''class ''', '''for ''', '''while ''']
snake_case_ = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=4 ) -> Tuple:
'''simple docstring'''
snake_case_ = example['''content'''].splitlines()
snake_case_ = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = tokenizer(example['''content'''], truncation=__UpperCAmelCase )['''input_ids''']
snake_case_ = len(example['''content'''] ) / len(__UpperCAmelCase )
return {"ratio": ratio}
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = {}
results.update(get_hash(__UpperCAmelCase ) )
results.update(line_stats(__UpperCAmelCase ) )
results.update(alpha_stats(__UpperCAmelCase ) )
results.update(char_token_ratio(__UpperCAmelCase ) )
results.update(is_autogenerated(__UpperCAmelCase ) )
results.update(is_config_or_test(__UpperCAmelCase ) )
results.update(has_no_keywords(__UpperCAmelCase ) )
results.update(has_few_assignments(__UpperCAmelCase ) )
return results
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
if not check_uniques(__UpperCAmelCase, __UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
with open(__UpperCAmelCase, '''rb''' ) as f_in:
with gzip.open(str(__UpperCAmelCase ) + '''.gz''', '''wb''', compresslevel=6 ) as f_out:
shutil.copyfileobj(__UpperCAmelCase, __UpperCAmelCase )
os.unlink(__UpperCAmelCase )
# Settings
a : List[Any] = HfArgumentParser(PreprocessingArguments)
a : Any = parser.parse_args()
if args.num_workers is None:
a : Union[str, Any] = multiprocessing.cpu_count()
a : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a : Union[str, Any] = time.time()
a : int = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
a : int = time.time()
a : List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
a : str = set(ds.unique('hash'))
a : List[Any] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
a : List[str] = time.time()
a : Tuple = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a : Tuple = time.time()
a ,a : Optional[int] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
a : Optional[int] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
a : Dict = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
a : Optional[int] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a : str = str(data_dir / f'''file-{file_number+1:012}.json''')
a : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 56
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 0
|
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a = None , __a = None , __a = False , __a = False , __a = None , __a = None , **__a , ):
super().__init__(
features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , )
__lowerCAmelCase = Generator(
cache_dir=__a , features=__a , generator=__a , gen_kwargs=__a , **__a , )
def snake_case ( self ):
# Build iterable dataset
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split="train" , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
| 57
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A , A , ) -> str:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def snake_case_( self , A = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def snake_case_( self ) -> List[Any]:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[str]:
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = 1
elif isinstance(A , A ):
_SCREAMING_SNAKE_CASE = len(A )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A )}.' )
# get prompt text embeddings
_SCREAMING_SNAKE_CASE = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = text_embeddings.shape
_SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , A , 1 )
_SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
_SCREAMING_SNAKE_CASE = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='
f' {type(A )}.' )
elif isinstance(A , A ):
_SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
_SCREAMING_SNAKE_CASE = negative_prompt
_SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
_SCREAMING_SNAKE_CASE = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
_SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(A , A , 1 )
_SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_SCREAMING_SNAKE_CASE = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
_SCREAMING_SNAKE_CASE = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
_SCREAMING_SNAKE_CASE = torch.randn(
A , generator=A , device=self.device , dtype=A )
_SCREAMING_SNAKE_CASE = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_SCREAMING_SNAKE_CASE = latents_reference.to(self.device )
_SCREAMING_SNAKE_CASE = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_SCREAMING_SNAKE_CASE = (latents_shape[3] - latents_shape_reference[3]) // 2
_SCREAMING_SNAKE_CASE = (latents_shape[2] - latents_shape_reference[2]) // 2
_SCREAMING_SNAKE_CASE = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_SCREAMING_SNAKE_CASE = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_SCREAMING_SNAKE_CASE = 0 if dx < 0 else dx
_SCREAMING_SNAKE_CASE = 0 if dy < 0 else dy
_SCREAMING_SNAKE_CASE = max(-dx , 0 )
_SCREAMING_SNAKE_CASE = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_SCREAMING_SNAKE_CASE = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_SCREAMING_SNAKE_CASE = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_SCREAMING_SNAKE_CASE = {}
if accepts_eta:
_SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_SCREAMING_SNAKE_CASE = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
_SCREAMING_SNAKE_CASE = 1 / 0.1_8215 * latents
_SCREAMING_SNAKE_CASE = self.vae.decode(A ).sample
_SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_SCREAMING_SNAKE_CASE = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_SCREAMING_SNAKE_CASE = None
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 58
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 0
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowerCamelCase = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
__lowerCamelCase = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
__lowerCamelCase = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
__lowerCamelCase = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
__lowerCamelCase = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
for tf_name, hf_name in patterns:
snake_case : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
return k
def UpperCamelCase ( __lowerCamelCase : dict , __lowerCamelCase : dict ):
snake_case : Dict = BigBirdPegasusConfig(**__lowerCamelCase )
snake_case : List[Any] = BigBirdPegasusForConditionalGeneration(__lowerCamelCase )
snake_case : List[Any] = torch_model.state_dict()
snake_case : Any = {}
# separating decoder weights
snake_case : Tuple = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
snake_case : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
snake_case : Any = [k.endswith(__lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(__lowerCamelCase ):
continue
snake_case : Union[str, Any] = DECODER_PATTERNS
snake_case : str = rename_state_dict_key(__lowerCamelCase , __lowerCamelCase )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
snake_case : Optional[Any] = v.T
snake_case : Optional[Any] = torch.from_numpy(__lowerCamelCase )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
snake_case : Tuple = [k.endswith(__lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(__lowerCamelCase ):
continue
snake_case : Dict = REMAINING_PATTERNS
snake_case : List[str] = rename_state_dict_key(__lowerCamelCase , __lowerCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
snake_case : str = v.T
snake_case : int = torch.from_numpy(__lowerCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case : Optional[Any] = mapping["model.embed_positions.weight"]
snake_case : List[str] = mapping.pop("model.embed_positions.weight" )
snake_case , snake_case : List[Any] = torch_model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
snake_case : Optional[int] = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Optional[Any] = tf.train.list_variables(__lowerCamelCase )
snake_case : List[str] = {}
snake_case : List[str] = ["global_step"]
for name, shape in tqdm(__lowerCamelCase , desc="converting tf checkpoint to dict" ):
snake_case : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : Union[str, Any] = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
snake_case : Union[str, Any] = array
return tf_weights
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : dict ):
snake_case : str = get_tf_weights_as_numpy(__lowerCamelCase )
snake_case : Optional[Any] = convert_bigbird_pegasus(__lowerCamelCase , __lowerCamelCase )
torch_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 59
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 100 ):
lowerCAmelCase : Union[str, Any] = (n * (n + 1) // 2) ** 2
lowerCAmelCase : List[str] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 60
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_a = {
'camembert-base': 512,
}
_a = '▁'
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CamembertTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=["<s>NOTUSED", "</s>NOTUSED"] , **lowercase_ , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Optional[int] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : int = vocab_file
UpperCAmelCase_ : Any = False if not self.vocab_file else True
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Tuple = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 61
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
from string import ascii_lowercase, ascii_uppercase
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
if not sentence:
return ""
__UpperCamelCase =dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 0
|
'''simple docstring'''
import os
def _lowerCamelCase ( ) -> Tuple:
with open(os.path.dirname(lowercase ) + "/grid.txt" ) as f:
_a = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
_a = 0
# right
for i in range(20 ):
for j in range(17 ):
_a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_a = temp
# down
for i in range(17 ):
for j in range(20 ):
_a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_a = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_a = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 63
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = 'swinv2'
__UpperCAmelCase : str = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self : Optional[Any] , __UpperCAmelCase : str=2_2_4 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : List[str]=9_6 , __UpperCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __UpperCAmelCase : Tuple=[3, 6, 1_2, 2_4] , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : Union[str, Any]=4.0 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : List[Any]=1E-5 , __UpperCAmelCase : List[str]=3_2 , **__UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = len(__UpperCAmelCase )
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
UpperCAmelCase__ = (0, 0, 0, 0)
| 65
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 0
|
"""simple docstring"""
def A_ ( _lowercase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowercase, _lowercase ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 66
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCAmelCase ={
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 0
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """ybelkada/fonts"""
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
"Pix2StructImageProcessor. Please upgrade torch." )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: int ) -> Tuple:
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
_check_torch_version()
A__ = image_tensor.unsqueeze(0 )
A__ = torch.nn.functional.unfold(SCREAMING_SNAKE_CASE_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
A__ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 )
A__ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int = 3_6 , SCREAMING_SNAKE_CASE_: str = "black" , SCREAMING_SNAKE_CASE_: str = "white" , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: Optional[bytes] = None , SCREAMING_SNAKE_CASE_: Optional[str] = None , ) -> Image.Image:
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , "vision" )
# Add new lines so that each line is no more than 80 characters.
A__ = textwrap.TextWrapper(width=8_0 )
A__ = wrapper.wrap(text=SCREAMING_SNAKE_CASE_ )
A__ = "\n".join(SCREAMING_SNAKE_CASE_ )
if font_bytes is not None and font_path is None:
A__ = io.BytesIO(SCREAMING_SNAKE_CASE_ )
elif font_path is not None:
A__ = font_path
else:
A__ = hf_hub_download(SCREAMING_SNAKE_CASE_ , "Arial.TTF" )
A__ = ImageFont.truetype(SCREAMING_SNAKE_CASE_ , encoding="UTF-8" , size=SCREAMING_SNAKE_CASE_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
A__ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , SCREAMING_SNAKE_CASE_ ) )
A__ , A__ , A__ , A__ = temp_draw.textbbox((0, 0) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Create the actual image with a bit of padding around the text.
A__ = text_width + left_padding + right_padding
A__ = text_height + top_padding + bottom_padding
A__ = Image.new("RGB" , (image_width, image_height) , SCREAMING_SNAKE_CASE_ )
A__ = ImageDraw.Draw(SCREAMING_SNAKE_CASE_ )
draw.text(xy=(left_padding, top_padding) , text=SCREAMING_SNAKE_CASE_ , fill=SCREAMING_SNAKE_CASE_ , font=SCREAMING_SNAKE_CASE_ )
return image
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: np.ndarray , SCREAMING_SNAKE_CASE_: str , **SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , "vision" )
# Convert to PIL image if necessary
A__ = to_pil_image(SCREAMING_SNAKE_CASE_ )
A__ = render_text(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A__ = max(header_image.width , image.width )
A__ = int(image.height * (new_width / image.width) )
A__ = int(header_image.height * (new_width / header_image.width) )
A__ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
A__ = to_numpy_array(SCREAMING_SNAKE_CASE_ )
if infer_channel_dimension_format(SCREAMING_SNAKE_CASE_ ) == ChannelDimension.LAST:
A__ = to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , ChannelDimension.LAST )
return new_image
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['flattened_patches']
def __init__( self , lowercase = True , lowercase = True , lowercase = None , lowercase = 2048 , lowercase = False , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase )
A__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
A__ = do_normalize
A__ = do_convert_rgb
A__ = max_patches
A__ = is_vqa
def UpperCamelCase ( self , lowercase , lowercase , lowercase , **lowercase ) -> np.ndarray:
'''simple docstring'''
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
A__ = to_channel_dimension_format(lowercase , ChannelDimension.FIRST )
A__ = torch.from_numpy(lowercase )
A__ , A__ = patch_size["height"], patch_size["width"]
A__ , A__ = get_image_size(lowercase )
# maximize scale s.t.
A__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
A__ = max(min(math.floor(scale * image_height / patch_height ) , lowercase ) , 1 )
A__ = max(min(math.floor(scale * image_width / patch_width ) , lowercase ) , 1 )
A__ = max(num_feasible_rows * patch_height , 1 )
A__ = max(num_feasible_cols * patch_width , 1 )
A__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=lowercase , antialias=lowercase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
A__ = torch_extract_patches(lowercase , lowercase , lowercase )
A__ = patches.shape
A__ = patches_shape[1]
A__ = patches_shape[2]
A__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
A__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
A__ = torch.arange(lowercase ).reshape([rows, 1] ).repeat(1 , lowercase ).reshape([rows * columns, 1] )
A__ = torch.arange(lowercase ).reshape([1, columns] ).repeat(lowercase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
A__ = row_ids.to(torch.floataa )
A__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
A__ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
A__ = torch.nn.functional.pad(lowercase , [0, 0, 0, max_patches - (rows * columns)] ).float()
A__ = to_numpy_array(lowercase )
return result
def UpperCamelCase ( self , lowercase , lowercase = None , **lowercase ) -> np.ndarray:
'''simple docstring'''
if image.dtype == np.uinta:
A__ = image.astype(np.floataa )
# take mean across the whole `image`
A__ = np.mean(lowercase )
A__ = np.std(lowercase )
A__ = max(lowercase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase , mean=lowercase , std=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> ImageInput:
'''simple docstring'''
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = patch_size if patch_size is not None else self.patch_size
A__ = max_patches if max_patches is not None else self.max_patches
A__ = self.is_vqa
if kwargs.get("data_format" , lowercase ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
A__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(lowercase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
A__ = kwargs.pop("font_bytes" , lowercase )
A__ = kwargs.pop("font_path" , lowercase )
if isinstance(lowercase , lowercase ):
A__ = [header_text] * len(lowercase )
A__ = [
render_header(lowercase , header_text[i] , font_bytes=lowercase , font_path=lowercase )
for i, image in enumerate(lowercase )
]
if do_normalize:
A__ = [self.normalize(image=lowercase ) for image in images]
# convert to torch tensor and permute
A__ = [
self.extract_flattened_patches(image=lowercase , max_patches=lowercase , patch_size=lowercase )
for image in images
]
# create attention mask in numpy
A__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
A__ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=lowercase )
return encoded_outputs
| 68
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 0
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=32, lowerCAmelCase__=3, lowerCAmelCase__=4, lowerCAmelCase__=[10, 20, 30, 40], lowerCAmelCase__=[2, 2, 3, 2], lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=37, lowerCAmelCase__="gelu", lowerCAmelCase__=10, lowerCAmelCase__=0.02, lowerCAmelCase__=["stage2", "stage3", "stage4"], lowerCAmelCase__=3, lowerCAmelCase__=None, ) -> str:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_stages
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = out_features
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = num_stages
def a_ ( self) -> List[Any]:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size)
snake_case_ = self.get_config()
return config, pixel_values, labels
def a_ ( self) -> Dict:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def a_ ( self) -> Dict:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCAmelCase__, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCAmelCase__, loss_ignore_index=255, num_labels=self.num_labels, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = UperNetForSemanticSegmentation(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size))
def a_ ( self) -> List[str]:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> List[str]:
snake_case_ = UperNetModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, has_text_modality=lowerCAmelCase__, hidden_size=37)
def a_ ( self) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self) -> Optional[Any]:
return
def a_ ( self) -> Tuple:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__)
@unittest.skip(reason='UperNet does not use inputs_embeds')
def a_ ( self) -> List[str]:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings')
def a_ ( self) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model')
def a_ ( self) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model')
def a_ ( self) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def a_ ( self) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def a_ ( self) -> Optional[Any]:
pass
def a_ ( self) -> Any:
def check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__), expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(lowerCAmelCase__)
snake_case_ = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
snake_case_ = model_class(config=lowerCAmelCase__)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
@unittest.skip(reason='UperNet does not have tied weights')
def a_ ( self) -> Any:
pass
@slow
def a_ ( self) -> List[str]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def UpperCAmelCase ( ) -> str:
snake_case_ = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
snake_case_ = Image.open(UpperCAmelCase ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> int:
snake_case_ = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny')
snake_case_ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny').to(lowerCAmelCase__)
snake_case_ = prepare_img()
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='pt').to(lowerCAmelCase__)
with torch.no_grad():
snake_case_ = model(**lowerCAmelCase__)
snake_case_ = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape, lowerCAmelCase__)
snake_case_ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCAmelCase__, atol=1e-4))
def a_ ( self) -> List[str]:
snake_case_ = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny')
snake_case_ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny').to(lowerCAmelCase__)
snake_case_ = prepare_img()
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='pt').to(lowerCAmelCase__)
with torch.no_grad():
snake_case_ = model(**lowerCAmelCase__)
snake_case_ = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape, lowerCAmelCase__)
snake_case_ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCAmelCase__, atol=1e-4))
| 69
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A__ : Any =logging.get_logger(__name__)
A__ : List[Any] ='''▁'''
A__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model'''}
A__ : Union[str, Any] ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
A__ : Dict ={
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
A__ : Union[str, Any] =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase ( snake_case_ ):
_lowercase: int = VOCAB_FILES_NAMES
_lowercase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase: str = ['''input_ids''', '''attention_mask''']
_lowercase: List[int] = []
_lowercase: List[int] = []
def __init__( self : int , __snake_case : Optional[Any] , __snake_case : Dict="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Dict="</s>" , __snake_case : str="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Optional[Dict[str, Any]] = None , __snake_case : str=None , __snake_case : str=False , **__snake_case : List[Any] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase = legacy_behaviour
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , tokenizer_file=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__snake_case , **__snake_case , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case )
}
_lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn"""
_lowerCAmelCase = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ) -> List[str]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , __snake_case : Optional[Any] ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase__ ( self : List[Any] ) -> Any:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase__ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Dict , __snake_case : str ) -> None:
_lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
_lowerCAmelCase = [1] * len(self.prefix_tokens )
_lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Optional[int] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase = src_lang
_lowerCAmelCase = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
_lowerCAmelCase = self.convert_tokens_to_ids(__snake_case )
_lowerCAmelCase = tgt_lang_id
return inputs
def lowercase__ ( self : List[Any] ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : Optional[int] , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] ) -> str:
_lowerCAmelCase = """""".join(__snake_case ).replace(__snake_case , """ """ ).strip()
return out_string
def lowercase__ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def lowercase__ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : str = "eng_Latn" , __snake_case : Optional[List[str]] = None , __snake_case : str = "fra_Latn" , **__snake_case : Optional[int] , ) -> BatchEncoding:
_lowerCAmelCase = src_lang
_lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def lowercase__ ( self : str ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Dict ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : str , __snake_case : int ) -> None:
_lowerCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
def lowercase__ ( self : Any , __snake_case : str ) -> None:
_lowerCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
| 70
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
import os
from datetime import datetime as dt
from github import Github
A_ :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def A ( ) -> Any:
__UpperCamelCase : Any =Github(os.environ['GITHUB_TOKEN'] )
__UpperCamelCase : Union[str, Any] =g.get_repo('huggingface/accelerate' )
__UpperCamelCase : Tuple =repo.get_issues(state='open' )
for issue in open_issues:
__UpperCamelCase : List[Any] =sorted([comment for comment in issue.get_comments()] ,key=lambda a_ : i.created_at ,reverse=a_ )
__UpperCamelCase : str =comments[0] if len(a_ ) > 0 else None
__UpperCamelCase : Any =dt.utcnow()
__UpperCamelCase : List[str] =(current_time - issue.updated_at).days
__UpperCamelCase : Union[str, Any] =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 71
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''CLIPFeatureExtractor''']
lowerCAmelCase__ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = ['''torch''', '''torchsde''']
def __init__( self : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(self ,['torch', 'torchsde'])
@classmethod
def lowerCAmelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(cls ,['torch', 'torchsde'])
@classmethod
def lowerCAmelCase ( cls : Any ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : List[Any]):
requires_backends(cls ,['torch', 'torchsde'])
| 73
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 0
|
"""simple docstring"""
from manim import *
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = Rectangle(height=0.5 ,width=0.5 )
A = Rectangle(height=0.25 ,width=0.25 )
A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
A = [mem.copy() for i in range(6 )]
A = [mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(A_ ,A_ ).arrange(A_ ,buff=0 )
A = Text('CPU' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
A = [mem.copy() for i in range(4 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = Text('GPU' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
A = [mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = Text('Model' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
A = []
A = []
A = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
A = Rectangle(height=0.46 / 4 ,width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ ,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] ,direction=A_ ,buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] ,direction=A_ ,buff=0.0 )
self.add(A_ )
model_cpu_arr.append(A_ )
self.add(*A_ ,*A_ ,*A_ )
A = [mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = Text('Loaded Checkpoint' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(A_ )
A = []
A = []
for i, rect in enumerate(A_ ):
A = fill.copy().set_fill(A_ ,opacity=0.7 )
target.move_to(A_ )
ckpt_arr.append(A_ )
A = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(A_ )
self.add(*A_ ,*A_ )
A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(A_ ,A_ )
A = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(A_ ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(A_ )
A = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
A = [meta_mem.copy() for i in range(6 )]
A = [meta_mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(A_ ,A_ ).arrange(A_ ,buff=0 )
A = Text('Disk' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(A_ ,run_time=3 ) ,Write(A_ ,run_time=1 ) ,Create(A_ ,run_time=1 ) )
A = []
for i, rect in enumerate(A_ ):
A = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(A_ ,run_time=1.5 ) )
self.play(*A_ )
self.play(FadeOut(A_ ) )
A = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ,run_time=3 ) )
self.play(
FadeOut(A_ ,A_ ,*A_ ,*A_ ) ,)
self.wait()
| 74
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 0
|
'''simple docstring'''
from __future__ import annotations
import requests
a_ : List[Any] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def a_ ( __snake_case : str , __snake_case : int = 1 , __snake_case : str = "new" , __snake_case : list | None = None ) -> dict:
"""simple docstring"""
lowerCamelCase_ =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
lowerCamelCase_ =F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(__snake_case )
lowerCamelCase_ =requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCamelCase_ =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
lowerCamelCase_ ={}
for id_ in range(__snake_case ):
lowerCamelCase_ ={
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 75
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
def __init__( self : Optional[int] , a : UNetaDModel , a : ScoreSdeVeScheduler ) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : List[Any] , a : int = 1 , a : int = 2000 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , **a : List[Any] , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[Any] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : List[Any] = self.unet
SCREAMING_SNAKE_CASE : List[Any] = randn_tensor(a , generator=a ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(self.device )
self.scheduler.set_timesteps(a )
self.scheduler.set_sigmas(a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : int = self.unet(a , a ).sample
SCREAMING_SNAKE_CASE : str = self.scheduler.step_correct(a , a , generator=a ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Optional[int] = model(a , a ).sample
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step_pred(a , a , a , generator=a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=a )
| 76
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 0
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : Any = 16
_UpperCamelCase : Tuple = 32
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" ):
'''simple docstring'''
lowercase__ : int = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(_lowerCAmelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : List[str] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : str = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
model.eval()
lowercase__ : List[str] = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : int = model(**_lowerCAmelCase )
lowercase__ : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ : Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCAmelCase ) - 1:
lowercase__ : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
lowercase__ : Any = metric.compute()
return eval_metric["accuracy"]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['lr']
lowercase__ : Any = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : str = int(config['batch_size'] )
lowercase__ : Optional[int] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Dict = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[str] = 1
lowercase__ : int = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Union[str, Any] = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : int = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Union[str, Any] = 0
lowercase__ : str = evaluate.load('glue' , 'mrpc' )
lowercase__ : Any = num_epochs
if args.partial_train_epoch is not None:
lowercase__ : int = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowercase__ : List[Any] = args.resume_from_checkpoint.split('epoch_' )[1]
lowercase__ : List[str] = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowercase__ : str = int(_lowerCAmelCase ) + 1
lowercase__ : str = evaluation_loop(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
accelerator.print('resumed checkpoint performance:' , _lowerCAmelCase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , 'r' ) as f:
lowercase__ : Optional[Any] = json.load(_lowerCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : Optional[int] = model(**_lowerCAmelCase )
lowercase__ : Tuple = outputs.loss
lowercase__ : Any = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowercase__ : Optional[Any] = f"""epoch_{epoch}"""
lowercase__ : Optional[int] = os.path.join(args.output_dir , _lowerCAmelCase )
accelerator.save_state(_lowerCAmelCase )
lowercase__ : Union[str, Any] = evaluation_loop(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : int = accuracy
lowercase__ : int = lr_scheduler.get_lr()[0]
lowercase__ : Dict = optimizer.param_groups[0]['lr']
lowercase__ : str = epoch
lowercase__ : int = overall_step
accelerator.print(f"""epoch {epoch}:""" , _lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : List[Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=2 , help='Number of train epochs.' , )
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : List[str] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 77
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ ):
if isinstance(lowercase_ , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase = tf.shape(lowercase_ )
if tensor.shape == tf.TensorShape(lowercase_ ):
return dynamic
UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowercase_ )]
def _lowerCAmelCase ( lowercase_ , lowercase_ = None , lowercase_ = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=lowercase_ , name=lowercase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=1e-5 , lowercase_=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase_ , lowercase_ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
UpperCAmelCase , UpperCAmelCase = tf.nn.moments(lowercase_ , axes=[axis] , keepdims=lowercase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase = [1] * inputs.shape.rank
UpperCAmelCase = shape_list(lowercase_ )[axis]
UpperCAmelCase = tf.reshape(lowercase_ , lowercase_ )
UpperCAmelCase = tf.reshape(lowercase_ , lowercase_ )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase = tf.nn.batch_normalization(
lowercase_ , lowercase_ , lowercase_ , offset=lowercase_ , scale=lowercase_ , variance_epsilon=lowercase_ , )
return outputs
def _lowerCAmelCase ( lowercase_ , lowercase_=0 , lowercase_=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase = tf.shape(lowercase_ )
UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowercase_ , lowercase_ )
def _lowerCAmelCase ( lowercase_ ):
if not isinstance(lowercase_ , tf.Tensor ):
UpperCAmelCase = tf.convert_to_tensor(lowercase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ = "input_ids" ):
tf.debugging.assert_less(
lowercase_ , tf.cast(lowercase_ , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase_ )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase = [x for x in data if len(lowercase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
UpperCAmelCase = np.asarray(lowercase_ )
UpperCAmelCase = 1
UpperCAmelCase = np.array_split(lowercase_ , lowercase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase = np.array_split(lowercase_ , lowercase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowercase_ ):
UpperCAmelCase = chunk_data
else:
UpperCAmelCase = data
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if name in group.attrs:
UpperCAmelCase = [n.decode('utf8' ) if hasattr(lowercase_ , 'decode' ) else n for n in group.attrs[name]]
else:
UpperCAmelCase = []
UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(lowercase_ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowerCAmelCase ( lowercase_ ):
def _expand_single_ad_tensor(lowercase_ ):
if isinstance(lowercase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowercase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowercase_ )
| 78
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 0
|
'''simple docstring'''
lowerCamelCase_ = 8.314462 # Unit - J mol-1 K-1
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'deta'
__UpperCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , a=None , a=9_00 , a=20_48 , a=6 , a=20_48 , a=8 , a=6 , a=10_24 , a=8 , a=0.0 , a=True , a="relu" , a=2_56 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=True , a=False , a="sine" , a=5 , a=4 , a=4 , a=True , a=3_00 , a=True , a=True , a=1 , a=5 , a=2 , a=1 , a=1 , a=5 , a=2 , a=0.1 , a=0.25 , **a , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase__ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(a , a ):
UpperCamelCase__ = backbone_config.pop("model_type" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(a )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = num_queries
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = auxiliary_loss
UpperCamelCase__ = position_embedding_type
# deformable attributes
UpperCamelCase__ = num_feature_levels
UpperCamelCase__ = encoder_n_points
UpperCamelCase__ = decoder_n_points
UpperCamelCase__ = two_stage
UpperCamelCase__ = two_stage_num_proposals
UpperCamelCase__ = with_box_refine
UpperCamelCase__ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = mask_loss_coefficient
UpperCamelCase__ = dice_loss_coefficient
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
UpperCamelCase__ = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def __a ( self ):
return self.encoder_attention_heads
@property
def __a ( self ):
return self.d_model
def __a ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 80
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __A ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a ={
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
a ={
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__A ) , x.transpose() ) )
a =np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =np.random.randn(3 , 4 )
a =torch.tensor(__A )
self.assertTrue(np.allclose(transpose(__A ) , transpose(__A ).numpy() ) )
a =np.random.randn(3 , 4 , 5 )
a =torch.tensor(__A )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , transpose(__A , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =np.random.randn(3 , 4 )
a =tf.constant(__A )
self.assertTrue(np.allclose(transpose(__A ) , transpose(__A ).numpy() ) )
a =np.random.randn(3 , 4 , 5 )
a =tf.constant(__A )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , transpose(__A , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =np.random.randn(3 , 4 )
a =jnp.array(__A )
self.assertTrue(np.allclose(transpose(__A ) , np.asarray(transpose(__A ) ) ) )
a =np.random.randn(3 , 4 , 5 )
a =jnp.array(__A )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , np.asarray(transpose(__A , axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , np.reshape(__A , (4, 3) ) ) )
a =np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , np.reshape(__A , (12, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =np.random.randn(3 , 4 )
a =torch.tensor(__A )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , reshape(__A , (4, 3) ).numpy() ) )
a =np.random.randn(3 , 4 , 5 )
a =torch.tensor(__A )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , reshape(__A , (12, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =np.random.randn(3 , 4 )
a =tf.constant(__A )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , reshape(__A , (4, 3) ).numpy() ) )
a =np.random.randn(3 , 4 , 5 )
a =tf.constant(__A )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , reshape(__A , (12, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =np.random.randn(3 , 4 )
a =jnp.array(__A )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , np.asarray(reshape(__A , (4, 3) ) ) ) )
a =np.random.randn(3 , 4 , 5 )
a =jnp.array(__A )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , np.asarray(reshape(__A , (12, 5) ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__A ) , np.squeeze(__A ) ) )
a =np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , np.squeeze(__A , axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =np.random.randn(1 , 3 , 4 )
a =torch.tensor(__A )
self.assertTrue(np.allclose(squeeze(__A ) , squeeze(__A ).numpy() ) )
a =np.random.randn(1 , 4 , 1 , 5 )
a =torch.tensor(__A )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , squeeze(__A , axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =np.random.randn(1 , 3 , 4 )
a =tf.constant(__A )
self.assertTrue(np.allclose(squeeze(__A ) , squeeze(__A ).numpy() ) )
a =np.random.randn(1 , 4 , 1 , 5 )
a =tf.constant(__A )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , squeeze(__A , axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =np.random.randn(1 , 3 , 4 )
a =jnp.array(__A )
self.assertTrue(np.allclose(squeeze(__A ) , np.asarray(squeeze(__A ) ) ) )
a =np.random.randn(1 , 4 , 1 , 5 )
a =jnp.array(__A )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , np.asarray(squeeze(__A , axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , np.expand_dims(__A , axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =np.random.randn(3 , 4 )
a =torch.tensor(__A )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , expand_dims(__A , axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =np.random.randn(3 , 4 )
a =tf.constant(__A )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , expand_dims(__A , axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =np.random.randn(3 , 4 )
a =jnp.array(__A )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , np.asarray(expand_dims(__A , axis=1 ) ) ) )
| 81
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
A__ = """Input must be a string of 8 numbers plus letter"""
A__ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
_lowerCAmelCase = F'Expected string as input, found {type(snake_case ).__name__}'
raise TypeError(snake_case )
_lowerCAmelCase = spanish_id.replace("""-""" , """""" ).upper()
if len(snake_case ) != 9:
raise ValueError(snake_case )
try:
_lowerCAmelCase = int(spanish_id_clean[0:8] )
_lowerCAmelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(snake_case ) from ex
if letter.isdigit():
raise ValueError(snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Union[str, Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = BioGptTokenizer
UpperCAmelCase_ :str = False
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ :Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[Any] = """lower newer"""
lowerCAmelCase_ :Tuple = """lower newer"""
return input_text, output_text
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase_ :Union[str, Any] = """lower"""
lowerCAmelCase_ :Any = ["""low""", """er</w>"""]
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCAmelCase_ :Dict = tokens + ["""<unk>"""]
lowerCAmelCase_ :List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 84
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "lilt"
def __init__( self , a__=30_522 , a__=768 , a__=12 , a__=12 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0 , a__="absolute" , a__=None , a__=4 , a__=1_024 , **a__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=a__ , **a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = classifier_dropout
snake_case_ = channel_shrink_ratio
snake_case_ = max_ad_position_embeddings
| 85
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 0
|
"""simple docstring"""
import enum
import shutil
import sys
lowerCamelCase__ , lowerCamelCase__ = shutil.get_terminal_size()
lowerCamelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class A__ ( enum.Enum):
A_ : Union[str, Any] = 0
A_ : List[Any] = 1
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase="" ):
sys.stdout.write(str(_UpperCamelCase ) + end )
sys.stdout.flush()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
forceWrite(F"\u001b[{color}m{content}\u001b[0m" , _UpperCamelCase )
def __lowerCAmelCase ():
forceWrite('\r' )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
forceWrite(F"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def __lowerCAmelCase ():
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def __lowerCAmelCase ():
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 86
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : str=None):
lowercase__ : Optional[int] = {}
if isinstance(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Any = model.mobilenet_va
else:
lowercase__ : List[str] = model
lowercase__ : List[str] = "MobilenetV1/Conv2d_0/"
lowercase__ : Any = backbone.conv_stem.convolution.weight
lowercase__ : Optional[Any] = backbone.conv_stem.normalization.bias
lowercase__ : Any = backbone.conv_stem.normalization.weight
lowercase__ : Dict = backbone.conv_stem.normalization.running_mean
lowercase__ : Optional[Any] = backbone.conv_stem.normalization.running_var
for i in range(13):
lowercase__ : Tuple = i + 1
lowercase__ : int = i * 2
lowercase__ : Optional[Any] = backbone.layer[pt_index]
lowercase__ : Optional[int] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase__ : Dict = pointer.convolution.weight
lowercase__ : str = pointer.normalization.bias
lowercase__ : Dict = pointer.normalization.weight
lowercase__ : str = pointer.normalization.running_mean
lowercase__ : Dict = pointer.normalization.running_var
lowercase__ : Union[str, Any] = backbone.layer[pt_index + 1]
lowercase__ : str = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase__ : int = pointer.convolution.weight
lowercase__ : Optional[Any] = pointer.normalization.bias
lowercase__ : Tuple = pointer.normalization.weight
lowercase__ : Dict = pointer.normalization.running_mean
lowercase__ : Optional[int] = pointer.normalization.running_var
if isinstance(_lowerCamelCase , _lowerCamelCase):
lowercase__ : str = "MobilenetV1/Logits/Conv2d_1c_1x1/"
lowercase__ : List[Any] = model.classifier.weight
lowercase__ : int = model.classifier.bias
return tf_to_pt_map
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Tuple):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Load weights from TF model
lowercase__ : Optional[Any] = tf.train.list_variables(_lowerCamelCase)
lowercase__ : Any = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''')
lowercase__ : Any = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = array
# Build TF to PyTorch weights loading map
lowercase__ : int = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''')
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''')
continue
lowercase__ : Tuple = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise")
lowercase__ : int = np.transpose(_lowerCamelCase , (2, 3, 0, 1))
elif "weights" in name:
logger.info("Transposing")
if len(pointer.shape) == 2: # copying into linear layer
lowercase__ : List[Any] = array.squeeze().transpose()
else:
lowercase__ : List[Any] = np.transpose(_lowerCamelCase , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''')
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''')
lowercase__ : Tuple = torch.from_numpy(_lowerCamelCase)
tf_weights.pop(_lowerCamelCase , _lowerCamelCase)
tf_weights.pop(name + "/RMSProp" , _lowerCamelCase)
tf_weights.pop(name + "/RMSProp_1" , _lowerCamelCase)
tf_weights.pop(name + "/ExponentialMovingAverage" , _lowerCamelCase)
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}''')
return model
def lowercase_ ( _lowerCamelCase : torch.Tensor , _lowerCamelCase : nn.Convad):
lowercase__ , lowercase__ : Optional[Any] = features.shape[-2:]
lowercase__ , lowercase__ : int = conv_layer.stride
lowercase__ , lowercase__ : Optional[Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase__ : Union[str, Any] = max(kernel_height - stride_height , 0)
else:
lowercase__ : List[str] = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
lowercase__ : List[Any] = max(kernel_width - stride_width , 0)
else:
lowercase__ : Optional[int] = max(kernel_width - (in_width % stride_width) , 0)
lowercase__ : Tuple = pad_along_width // 2
lowercase__ : Tuple = pad_along_width - pad_left
lowercase__ : List[Any] = pad_along_height // 2
lowercase__ : List[Any] = pad_along_height - pad_top
lowercase__ : Dict = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , "constant" , 0.0)
class snake_case_ ( nn.Module ):
def __init__( self : List[str] , lowercase_ : MobileNetVaConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : Optional[int] = 1 , lowercase_ : Optional[int] = 1 , lowercase_ : bool = False , lowercase_ : Optional[bool] = True , lowercase_ : Optional[bool or str] = True , ) -> None:
super().__init__()
lowercase__ : int = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase__ : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase__ : str = nn.Convad(
in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=lowercase_ , groups=lowercase_ , bias=lowercase_ , padding_mode="zeros" , )
if use_normalization:
lowercase__ : Dict = nn.BatchNormad(
num_features=lowercase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=lowercase_ , track_running_stats=lowercase_ , )
else:
lowercase__ : Optional[int] = None
if use_activation:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowercase_ ):
lowercase__ : int = ACTaFN[config.hidden_act]
else:
lowercase__ : Tuple = config.hidden_act
else:
lowercase__ : Optional[Any] = None
def __UpperCamelCase ( self : Any , lowercase_ : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
lowercase__ : int = apply_tf_padding(lowercase_ , self.convolution )
lowercase__ : List[str] = self.convolution(lowercase_ )
if self.normalization is not None:
lowercase__ : Any = self.normalization(lowercase_ )
if self.activation is not None:
lowercase__ : Any = self.activation(lowercase_ )
return features
class snake_case_ ( __A ):
__A : int = MobileNetVaConfig
__A : List[Any] = load_tf_weights_in_mobilenet_va
__A : Tuple = "mobilenet_v1"
__A : str = "pixel_values"
__A : Dict = False
def __UpperCamelCase ( self : Any , lowercase_ : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(lowercase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,__A ,)
class snake_case_ ( __A ):
def __init__( self : Optional[int] , lowercase_ : MobileNetVaConfig , lowercase_ : bool = True ) -> Union[str, Any]:
super().__init__(lowercase_ )
lowercase__ : str = config
lowercase__ : List[Any] = 32
lowercase__ : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase__ : Optional[int] = MobileNetVaConvLayer(
lowercase_ , in_channels=config.num_channels , out_channels=lowercase_ , kernel_size=3 , stride=2 , )
lowercase__ : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase__ : Any = nn.ModuleList()
for i in range(13 ):
lowercase__ : List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase__ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=3 , stride=strides[i] , groups=lowercase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=1 , ) )
lowercase__ : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self : int , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowercase__ : Dict = self.conv_stem(lowercase_ )
lowercase__ : Union[str, Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase__ : List[Any] = layer_module(lowercase_ )
if output_hidden_states:
lowercase__ : Optional[Any] = all_hidden_states + (hidden_states,)
lowercase__ : int = hidden_states
if self.pooler is not None:
lowercase__ : Any = torch.flatten(self.pooler(lowercase_ ) , start_dim=1 )
else:
lowercase__ : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=lowercase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,__A ,)
class snake_case_ ( __A ):
def __init__( self : Optional[Any] , lowercase_ : MobileNetVaConfig ) -> None:
super().__init__(lowercase_ )
lowercase__ : int = config.num_labels
lowercase__ : Optional[int] = MobileNetVaModel(lowercase_ )
lowercase__ : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase__ : Optional[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowercase_ )
lowercase__ : Dict = nn.Linear(lowercase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self : Any , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = self.mobilenet_va(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase__ : int = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Any = self.classifier(self.dropout(lowercase_ ) )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : Dict = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Optional[Any] = "single_label_classification"
else:
lowercase__ : Optional[int] = "multi_label_classification"
if self.config.problem_type == "regression":
lowercase__ : Tuple = MSELoss()
if self.num_labels == 1:
lowercase__ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ : Optional[int] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Any = CrossEntropyLoss()
lowercase__ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Optional[Any] = BCEWithLogitsLoss()
lowercase__ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase__ : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states , )
| 87
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCAmelCase : Optional[Any] = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : int ,_UpperCAmelCase : Optional[NestedDataStructureLike[PathLike]] = None ,_UpperCAmelCase : Optional[NamedSplit] = None ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Union[str, Any] ,):
_a : int = path_or_paths
_a : List[Any] = split if split or isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else 'train'
_a : Tuple = features
_a : Optional[Any] = cache_dir
_a : List[str] = keep_in_memory
_a : Tuple = streaming
_a : str = num_proc
_a : int = kwargs
@abstractmethod
def __lowercase ( self : Tuple ):
pass
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[Any] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Optional[int] ,):
_a : Union[str, Any] = features
_a : List[str] = cache_dir
_a : Optional[Any] = keep_in_memory
_a : int = streaming
_a : int = num_proc
_a : List[str] = kwargs
@abstractmethod
def __lowercase ( self : List[Any] ):
pass
| 89
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 0
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = RobertaConfig
snake_case_ = '''roberta'''
def __init__( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
__lowerCamelCase = RobertaEmbeddings(lowerCamelCase__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = RobertaConfig
snake_case_ = '''roberta'''
def __init__( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
__lowerCamelCase = config.num_labels
__lowerCamelCase = config.num_hidden_layers
__lowerCamelCase = DeeRobertaModel(lowerCamelCase__ )
__lowerCamelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=-1 , lowerCamelCase__=False , ) -> str:
'''simple docstring'''
__lowerCamelCase = self.num_layers
try:
__lowerCamelCase = self.roberta(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , position_ids=lowerCamelCase__ , head_mask=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ , )
__lowerCamelCase = outputs[1]
__lowerCamelCase = self.dropout(lowerCamelCase__ )
__lowerCamelCase = self.classifier(lowerCamelCase__ )
__lowerCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCamelCase = e.message
__lowerCamelCase = e.exit_layer
__lowerCamelCase = outputs[0]
if not self.training:
__lowerCamelCase = entropy(lowerCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase = MSELoss()
__lowerCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase = CrossEntropyLoss()
__lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowerCamelCase = []
for highway_exit in outputs[-1]:
__lowerCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase = MSELoss()
__lowerCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase = CrossEntropyLoss()
__lowerCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase__ )
if train_highway:
__lowerCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowerCamelCase = (loss,) + outputs
if not self.training:
__lowerCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 90
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 0
|
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a , __a ) -> tuple[float, list[float]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(range(len(__a ) ) )
SCREAMING_SNAKE_CASE_ : Dict = [v / w for v, w in zip(__a , __a )]
index.sort(key=lambda __a : ratio[i] , reverse=__a )
SCREAMING_SNAKE_CASE_ : float = 0
SCREAMING_SNAKE_CASE_ : list[float] = [0] * len(__a )
for i in index:
if weight[i] <= capacity:
SCREAMING_SNAKE_CASE_ : Any = 1
max_value += value[i]
capacity -= weight[i]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 0
|
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
while b:
__lowerCAmelCase , __lowerCAmelCase = b, a % b
return a
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE_ , a % b )
def _a ( ):
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 92
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 0
|
def __lowerCamelCase ( UpperCAmelCase_ : int = 50 ):
"""simple docstring"""
a :str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 94
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _A ( ):
"""simple docstring"""
a__ : Dict =ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE )
# Let's go
a__ : List[str] =parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 95
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 0
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def _snake_case ( lowercase__ , lowercase__ ):
return np.sqrt(np.sum((np.asarray(lowercase__ ) - np.asarray(lowercase__ )) ** 2 ) )
def _snake_case ( lowercase__ , lowercase__ ):
return sum((va - va) ** 2 for va, va in zip(lowercase__ , lowercase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def _snake_case ( ):
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 96
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = 10
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = [1, 2, 3, 4]
UpperCamelCase__ :Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCamelCase__ :Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCamelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCamelCase__ , UpperCamelCase__ :int = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = ''''''
UpperCamelCase__ , UpperCamelCase__ :List[str] = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCamelCase__ , UpperCamelCase__ :Tuple = process_story(UpperCamelCase_ )
UpperCamelCase__ :Any = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :str = ['''It was the best of times.''']
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = torch.tensor([1, 2, 3, 4] )
UpperCamelCase__ :Optional[int] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCamelCase__ :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCamelCase__ :Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = 101
UpperCamelCase__ :List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCamelCase__ :Tuple = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCamelCase__ :Any = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 97
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "nllb-moe"
snake_case__ = ["past_key_values"]
snake_case__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple ,lowerCamelCase__ : Optional[Any]=128_112 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : Any=4_096 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : Any=4_096 ,lowerCamelCase__ : str=16 ,lowerCamelCase__ : str=0.0_5 ,lowerCamelCase__ : Dict=0.0_5 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Dict="relu" ,lowerCamelCase__ : Any=1_024 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Optional[int]="float32" ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : Dict=128 ,lowerCamelCase__ : int=64 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : Dict=0.0_0_1 ,lowerCamelCase__ : List[str]=0.0_0_1 ,lowerCamelCase__ : Optional[Any]="all" ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Optional[int]=1.0 ,lowerCamelCase__ : Union[str, Any]=0.2 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : List[Any]=0 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Optional[Any]=False ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = router_z_loss_coef
UpperCAmelCase__ = router_aux_loss_coef
UpperCAmelCase__ = decoder_sparse_step
UpperCAmelCase__ = encoder_sparse_step
UpperCAmelCase__ = num_experts
UpperCAmelCase__ = expert_capacity
UpperCAmelCase__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
UpperCAmelCase__ = router_dtype
UpperCAmelCase__ = router_ignore_padding_tokens
UpperCAmelCase__ = batch_prioritized_routing
UpperCAmelCase__ = second_expert_policy
UpperCAmelCase__ = normalize_router_prob_before_dropping
UpperCAmelCase__ = moe_eval_capacity_token_fraction
UpperCAmelCase__ = moe_token_dropout
UpperCAmelCase__ = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 0
|
from __future__ import annotations
def A_ ( A__ , A__ ) -> list[list[int]]:
a__ : list[list[int]] = []
a__ : list[int] = []
a__ : Any = 0
a__ : Union[str, Any] = sum(A__ )
create_state_space_tree(A__ , A__ , A__ , A__ , A__ , A__ )
return result
def A_ ( A__ , A__ , A__ , A__ , A__ , A__ , ) -> None:
if sum(A__ ) > max_sum or (remaining_nums_sum + sum(A__ )) < max_sum:
return
if sum(A__ ) == max_sum:
result.append(A__ )
return
for index in range(A__ , len(A__ ) ):
create_state_space_tree(
A__ , A__ , index + 1 , [*path, nums[index]] , A__ , remaining_nums_sum - nums[index] , )
lowercase : Any = [3, 3_4, 4, 1_2, 5, 2]
lowercase : Any = 9
lowercase : Optional[int] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 99
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : List[str] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Optional[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Union[str, Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Tuple = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : List[str] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : List[str] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
def _lowerCAmelCase ( *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(UpperCamelCase_ , ["""torch"""] )
def _lowerCAmelCase ( *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(UpperCamelCase_ , ["""torch"""] )
def _lowerCAmelCase ( *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(UpperCamelCase_ , ["""torch"""] )
def _lowerCAmelCase ( *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(UpperCamelCase_ , ["""torch"""] )
def _lowerCAmelCase ( *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(UpperCamelCase_ , ["""torch"""] )
def _lowerCAmelCase ( *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(UpperCamelCase_ , ["""torch"""] )
def _lowerCAmelCase ( *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(UpperCamelCase_ , ["""torch"""] )
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : str = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Union[str, Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Union[str, Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : int = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : List[str] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Tuple = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Optional[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : List[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Optional[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Tuple = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : List[str] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : str = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Tuple = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Union[str, Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Optional[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : str = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : List[str] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : int = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : int = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : str = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : str = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : int = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Optional[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Optional[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : int = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
@classmethod
def snake_case_ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(cls , ["""torch"""])
| 100
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase__ :Tuple = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 101
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 0
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( _snake_case : int ) ->Union[str, Any]:
"""simple docstring"""
for param in module.parameters():
__snake_case : int = False
def lowercase ( ) ->Any:
"""simple docstring"""
__snake_case : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__snake_case : str = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowercase ( _snake_case : List[Any] ) ->Any:
"""simple docstring"""
__snake_case : Dict = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def lowercase ( ) ->List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = datetime.now()
__snake_case : str = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 102
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Any = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
'''simple docstring'''
from __future__ import annotations
def _A ( A__ ):
"""simple docstring"""
__lowercase = 2
__lowercase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(A__ )
if n > 1:
factors.append(A__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , ) -> Optional[int]:
a : int = parent
a : Any = 13
a : Optional[Any] = 7
a : Optional[int] = True
a : Optional[int] = True
a : List[Any] = True
a : Union[str, Any] = True
a : List[Any] = True
a : Optional[int] = False
a : Optional[int] = False
a : List[Any] = False
a : int = 2
a : Optional[Any] = 99
a : Dict = 0
a : Optional[int] = 32
a : Tuple = 2
a : Union[str, Any] = 4
a : List[str] = 0.1
a : str = 0.1
a : List[Any] = 512
a : Dict = 16
a : int = 2
a : List[str] = 0.02
a : str = 3
a : List[Any] = 4
a : Any = "last"
a : Union[str, Any] = True
a : Any = None
a : Dict = 0
def __a ( self ) -> Optional[int]:
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
a : int = None
if self.use_input_lengths:
a : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a : str = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
a : Optional[int] = None
a : Optional[Any] = None
a : Tuple = None
if self.use_labels:
a : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
a : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
a : List[str] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Tuple:
a : Tuple = TFFlaubertModel(config=lowerCAmelCase__ )
a : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
a : Dict = model(lowerCAmelCase__ )
a : List[Any] = [input_ids, input_mask]
a : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Union[str, Any]:
a : Dict = TFFlaubertWithLMHeadModel(lowerCAmelCase__ )
a : Optional[Any] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
a : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Dict:
a : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(lowerCAmelCase__ )
a : Union[str, Any] = {"input_ids": input_ids, "lengths": input_lengths}
a : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> int:
a : str = TFFlaubertForSequenceClassification(lowerCAmelCase__ )
a : Dict = {"input_ids": input_ids, "lengths": input_lengths}
a : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> int:
a : int = self.num_labels
a : Tuple = TFFlaubertForTokenClassification(config=lowerCAmelCase__ )
a : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> str:
a : Dict = self.num_choices
a : Optional[Any] = TFFlaubertForMultipleChoice(config=lowerCAmelCase__ )
a : int = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
a : List[str] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
a : Optional[int] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
a : Tuple = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self ) -> Dict:
a : List[Any] = self.prepare_config_and_inputs()
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Dict = config_and_inputs
a : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : int =(
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase : Any =(
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase : List[str] =(
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : List[str] =False
lowerCamelCase : int =False
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self ) -> Union[str, Any]:
a : Optional[Any] = TFFlaubertModelTester(self )
a : Any = ConfigTester(self , config_class=lowerCAmelCase__ , emb_dim=37 )
def __a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __a ( self ) -> Tuple:
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase__ )
def __a ( self ) -> int:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCAmelCase__ )
def __a ( self ) -> Any:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCAmelCase__ )
@slow
def __a ( self ) -> Optional[Any]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Any = TFFlaubertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Any:
a : Union[str, Any] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
a : Optional[int] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
a : List[str] = model(lowerCAmelCase__ )[0]
a : str = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice.
a : Union[str, Any] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 105
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCamelCase : int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase_ : str ):
requires_backends(self ,['''bs4'''] )
super().__init__(**lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Optional[Any] ):
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Optional[int] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCAmelCase__ : Union[str, Any] = parent.find_all(child.name ,recursive=lowercase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowercase_ ) else next(i for i, s in enumerate(lowercase_ ,1 ) if s is child ) )
lowerCAmelCase__ : Tuple = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __lowerCAmelCase ( self : List[str] ,lowercase_ : int ):
lowerCAmelCase__ : str = BeautifulSoup(lowercase_ ,'''html.parser''' )
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Optional[int] = []
for element in html_code.descendants:
if type(lowercase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowerCAmelCase__ : str = html.unescape(lowercase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.xpath_soup(lowercase_ )
stringaxtag_seq.append(lowercase_ )
stringaxsubs_seq.append(lowercase_ )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __lowerCAmelCase ( self : Tuple ,lowercase_ : int ,lowercase_ : Any ):
lowerCAmelCase__ : List[Any] = ''''''
for tagname, subs in zip(lowercase_ ,lowercase_ ):
xpath += F'/{tagname}'
if subs != 0:
xpath += F'[{subs}]'
return xpath
def __call__( self : int ,lowercase_ : Optional[int] ):
lowerCAmelCase__ : Optional[int] = False
# Check that strings has a valid type
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Dict = True
elif isinstance(lowercase_ ,(list, tuple) ):
if len(lowercase_ ) == 0 or isinstance(html_strings[0] ,lowercase_ ):
lowerCAmelCase__ : Optional[int] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'but is of type {type(lowercase_ )}.' )
lowerCAmelCase__ : Dict = bool(isinstance(lowercase_ ,(list, tuple) ) and (isinstance(html_strings[0] ,lowercase_ )) )
if not is_batched:
lowerCAmelCase__ : Dict = [html_strings]
# Get nodes + xpaths
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Union[str, Any] = []
for html_string in html_strings:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : str = self.get_three_from_single(lowercase_ )
nodes.append(lowercase_ )
lowerCAmelCase__ : List[str] = []
for node, tag_list, sub_list in zip(lowercase_ ,lowercase_ ,lowercase_ ):
lowerCAmelCase__ : str = self.construct_xpath(lowercase_ ,lowercase_ )
xpath_strings.append(lowercase_ )
xpaths.append(lowercase_ )
# return as Dict
lowerCAmelCase__ : str = {'''nodes''': nodes, '''xpaths''': xpaths}
lowerCAmelCase__ : Dict = BatchFeature(data=lowercase_ ,tensor_type=lowercase_ )
return encoded_inputs
| 106
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 0
|
def __magic_name__ ( A : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
a = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
a = int(sequence[i], 2 )
return sequence
def __magic_name__ ( A : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = "0" + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = "1" + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = original_name.split("." )[0]
lowerCAmelCase : Any = key.split("." )
lowerCAmelCase : List[str] = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowerCAmelCase : Tuple = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowerCAmelCase : str = orig_block_num - offset
lowerCAmelCase : Optional[int] = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : int = OrderedDict()
lowerCAmelCase , lowerCAmelCase : Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
lowerCAmelCase : Optional[Any] = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
lowerCAmelCase : Any = key[: key.find("proj" )]
lowerCAmelCase : Any = key.replace(SCREAMING_SNAKE_CASE , f"""patch_embeddings.{total_embed_found}.""" )
lowerCAmelCase : int = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
lowerCAmelCase : Optional[int] = "poolformer.encoder." + key
if "mlp.fc1" in key:
lowerCAmelCase : Optional[int] = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
lowerCAmelCase : List[str] = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
lowerCAmelCase : Optional[Any] = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "norm1" , "before_norm" )
if "norm2" in key:
lowerCAmelCase : int = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "norm2" , "after_norm" )
if "layer_scale_1" in key:
lowerCAmelCase : Tuple = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
lowerCAmelCase : List[Any] = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
lowerCAmelCase : Optional[int] = key.replace("head" , "classifier" )
lowerCAmelCase : List[str] = value
return new_state_dict
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = PoolFormerConfig()
# set attributes based on model_name
lowerCAmelCase : List[str] = "huggingface/label-files"
lowerCAmelCase : str = model_name[-3:]
lowerCAmelCase : List[Any] = 1_0_0_0
lowerCAmelCase : Optional[int] = "imagenet-1k-id2label.json"
lowerCAmelCase : int = (1, 1_0_0_0)
# set config attributes
lowerCAmelCase : int = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
lowerCAmelCase : List[Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase : List[Any] = idalabel
lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
lowerCAmelCase : Union[str, Any] = [2, 2, 6, 2]
lowerCAmelCase : Tuple = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase : str = 4.0
lowerCAmelCase : Dict = 0.9
elif size == "s24":
lowerCAmelCase : Dict = [4, 4, 1_2, 4]
lowerCAmelCase : Dict = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase : Optional[int] = 4.0
lowerCAmelCase : List[str] = 0.9
elif size == "s36":
lowerCAmelCase : List[Any] = [6, 6, 1_8, 6]
lowerCAmelCase : int = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase : Dict = 4.0
lowerCAmelCase : int = 1E-6
lowerCAmelCase : Union[str, Any] = 0.9
elif size == "m36":
lowerCAmelCase : List[Any] = [6, 6, 1_8, 6]
lowerCAmelCase : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
lowerCAmelCase : Tuple = 4.0
lowerCAmelCase : int = 1E-6
lowerCAmelCase : Optional[Any] = 0.95
elif size == "m48":
lowerCAmelCase : Tuple = [8, 8, 2_4, 8]
lowerCAmelCase : Tuple = [9_6, 1_9_2, 3_8_4, 7_6_8]
lowerCAmelCase : str = 4.0
lowerCAmelCase : Optional[Any] = 1E-6
lowerCAmelCase : Tuple = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
lowerCAmelCase : Optional[Any] = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowerCAmelCase : Dict = prepare_img()
lowerCAmelCase : str = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
lowerCAmelCase : Dict = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
# rename keys
lowerCAmelCase : str = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase : List[Any] = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowerCAmelCase : Any = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
lowerCAmelCase : List[Any] = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowerCAmelCase : Optional[Any] = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowerCAmelCase : Any = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowerCAmelCase : int = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowerCAmelCase : str = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowerCAmelCase : Optional[Any] = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 108
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 0
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A: List[str] = logging.get_logger(__name__)
A: Dict = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Any = 'conditional_detr'
__lowerCAmelCase : Union[str, Any] = ['past_key_values']
__lowerCAmelCase : int = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.25 , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase : Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : str = backbone_config.get("""model_type""" )
UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : Union[str, Any] = config_class.from_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = use_timm_backbone
UpperCAmelCase : Optional[int] = backbone_config
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : Any = num_queries
UpperCAmelCase : Union[str, Any] = d_model
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : Optional[int] = encoder_layers
UpperCAmelCase : Union[str, Any] = encoder_attention_heads
UpperCAmelCase : Optional[Any] = decoder_ffn_dim
UpperCAmelCase : Any = decoder_layers
UpperCAmelCase : Optional[int] = decoder_attention_heads
UpperCAmelCase : Optional[int] = dropout
UpperCAmelCase : Dict = attention_dropout
UpperCAmelCase : Dict = activation_dropout
UpperCAmelCase : Any = activation_function
UpperCAmelCase : Any = init_std
UpperCAmelCase : Tuple = init_xavier_std
UpperCAmelCase : Optional[int] = encoder_layerdrop
UpperCAmelCase : Any = decoder_layerdrop
UpperCAmelCase : Any = encoder_layers
UpperCAmelCase : Optional[Any] = auxiliary_loss
UpperCAmelCase : List[Any] = position_embedding_type
UpperCAmelCase : Union[str, Any] = backbone
UpperCAmelCase : List[Any] = use_pretrained_backbone
UpperCAmelCase : Dict = dilation
# Hungarian matcher
UpperCAmelCase : Optional[int] = class_cost
UpperCAmelCase : List[str] = bbox_cost
UpperCAmelCase : List[str] = giou_cost
# Loss coefficients
UpperCAmelCase : List[Any] = mask_loss_coefficient
UpperCAmelCase : List[str] = dice_loss_coefficient
UpperCAmelCase : Optional[int] = cls_loss_coefficient
UpperCAmelCase : Union[str, Any] = bbox_loss_coefficient
UpperCAmelCase : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase : int = focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.d_model
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase : Dict = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 12
| 109
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase : Optional[Any] = 1_6
lowerCamelCase : Dict = 3_2
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = 16 ,lowercase = "bert-base-cased" ) -> str:
snake_case : Optional[int] = AutoTokenizer.from_pretrained(lowercase )
snake_case : List[str] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Optional[int] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case : Optional[Any] = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : str = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase ,padding="""max_length""" ,max_length=128 ,return_tensors="""pt""" )
return tokenizer.pad(lowercase ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case : List[Any] = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
snake_case : str = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Union[str, Any]:
snake_case : Dict = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : str = config["lr"]
snake_case : List[str] = int(config["""num_epochs"""] )
snake_case : Tuple = int(config["""seed"""] )
snake_case : Dict = int(config["""batch_size"""] )
snake_case : Optional[Any] = args.model_name_or_path
set_seed(lowercase )
snake_case : Tuple = get_dataloaders(lowercase ,lowercase ,lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase ,return_dict=lowercase )
# Instantiate optimizer
snake_case : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case : Any = optimizer_cls(params=model.parameters() ,lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
snake_case : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case : str = 1
snake_case : str = (len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=0 ,num_training_steps=lowercase ,)
else:
snake_case : Optional[int] = DummyScheduler(lowercase ,total_num_steps=lowercase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case : List[Any] = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# We need to keep track of how many total steps we have iterated over
snake_case : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case : int = 0
# Now we train the model
snake_case : Tuple = evaluate.load("""glue""" ,"""mrpc""" )
snake_case : Optional[int] = 0
snake_case : Any = {}
for epoch in range(lowercase ,lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
snake_case : List[Any] = model(**lowercase )
snake_case : List[Any] = outputs.loss
snake_case : int = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case : Tuple = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : str = model(**lowercase )
snake_case : Union[str, Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case : int = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
snake_case : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
snake_case : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" ,lowercase )
snake_case : Union[str, Any] = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
snake_case : int = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,"""all_results.json""" ) ,"""w""" ) as f:
json.dump(lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
snake_case : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" ,type=lowercase ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=lowercase ,)
parser.add_argument(
"""--output_dir""" ,type=lowercase ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--performance_lower_bound""" ,type=lowercase ,default=lowercase ,help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=lowercase ,default=3 ,help="""Number of train epochs.""" ,)
snake_case : Union[str, Any] = parser.parse_args()
snake_case : List[str] = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 124
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 0
|
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase = None ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =value
_lowercase =random()
_lowercase =None
_lowercase =None
def __repr__( self ) -> List[str]:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =str(self.value ) + " "
_lowercase =str(self.left or '' )
_lowercase =str(self.right or '' )
return value + left + right
def a ( A__ : Dict , A__ : Any ) -> str:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowercase =split(root.left , A__ )
return left, root
else:
_lowercase =split(root.right , A__ )
return root, right
def a ( A__ : Any , A__ : Dict ) -> Any:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowercase =merge(left.right , A__ )
return left
else:
_lowercase =merge(A__ , right.left )
return right
def a ( A__ : List[Any] , A__ : Optional[int] ) -> str:
"""simple docstring"""
_lowercase =Node(A__ )
_lowercase =split(A__ , A__ )
return merge(merge(A__ , A__ ) , A__ )
def a ( A__ : int , A__ : Any ) -> Tuple:
"""simple docstring"""
_lowercase =split(A__ , value - 1 )
_lowercase =split(A__ , A__ )
return merge(A__ , A__ )
def a ( A__ : Tuple ) -> int:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def a ( A__ : Tuple , A__ : Any ) -> Optional[Any]:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
_lowercase =insert(A__ , int(arg[1:] ) )
elif arg[0] == "-":
_lowercase =erase(A__ , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def a ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_lowercase =input()
while args != "q":
_lowercase =interact_treap(A__ , A__ )
print(A__ )
_lowercase =input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 205
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : str = SwinConfig()
lowerCAmelCase__ : Tuple = swin_name.split("""_""" )
lowerCAmelCase__ : Tuple = name_split[1]
lowerCAmelCase__ : Union[str, Any] = int(name_split[4] )
lowerCAmelCase__ : Any = int(name_split[3][-1] )
if model_size == "tiny":
lowerCAmelCase__ : Optional[Any] = 96
lowerCAmelCase__ : Tuple = (2, 2, 6, 2)
lowerCAmelCase__ : List[str] = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase__ : List[Any] = 96
lowerCAmelCase__ : Union[str, Any] = (2, 2, 18, 2)
lowerCAmelCase__ : str = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase__ : str = 128
lowerCAmelCase__ : Optional[int] = (2, 2, 18, 2)
lowerCAmelCase__ : Dict = (4, 8, 16, 32)
else:
lowerCAmelCase__ : Dict = 192
lowerCAmelCase__ : Dict = (2, 2, 18, 2)
lowerCAmelCase__ : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCAmelCase__ : Any = 2_1841
else:
lowerCAmelCase__ : Any = 1000
lowerCAmelCase__ : Optional[Any] = "huggingface/label-files"
lowerCAmelCase__ : Union[str, Any] = "imagenet-1k-id2label.json"
lowerCAmelCase__ : Dict = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : List[str] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : int = idalabel
lowerCAmelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = img_size
lowerCAmelCase__ : Tuple = num_classes
lowerCAmelCase__ : Any = embed_dim
lowerCAmelCase__ : str = depths
lowerCAmelCase__ : List[Any] = num_heads
lowerCAmelCase__ : Union[str, Any] = window_size
return config
def lowercase_ ( __UpperCAmelCase ) -> Union[str, Any]:
if "patch_embed.proj" in name:
lowerCAmelCase__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase__ : str = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
lowerCAmelCase__ : str = "encoder." + name
if "attn.proj" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase__ : Any = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase__ : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase__ : Tuple = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase__ : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
lowerCAmelCase__ : Any = "layernorm.weight"
if name == "norm.bias":
lowerCAmelCase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowerCAmelCase__ : List[Any] = name.replace("""head""" , """classifier""" )
else:
lowerCAmelCase__ : List[str] = "swin." + name
return name
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : Optional[int] = orig_state_dict.pop(__UpperCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase__ : Dict = key.split(""".""" )
lowerCAmelCase__ : Optional[Any] = int(key_split[1] )
lowerCAmelCase__ : Dict = int(key_split[3] )
lowerCAmelCase__ : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase__ : Dict = val[:dim, :]
lowerCAmelCase__ : str = val[
dim : dim * 2, :
]
lowerCAmelCase__ : Tuple = val[-dim:, :]
else:
lowerCAmelCase__ : Optional[int] = val[
:dim
]
lowerCAmelCase__ : List[Any] = val[
dim : dim * 2
]
lowerCAmelCase__ : Tuple = val[
-dim:
]
else:
lowerCAmelCase__ : str = val
return orig_state_dict
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase )
timm_model.eval()
lowerCAmelCase__ : str = get_swin_config(__UpperCAmelCase )
lowerCAmelCase__ : Any = SwinForImageClassification(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = convert_state_dict(timm_model.state_dict() , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ : int = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
lowerCAmelCase__ : Optional[int] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
lowerCAmelCase__ : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" )
lowerCAmelCase__ : List[Any] = timm_model(inputs["""pixel_values"""] )
lowerCAmelCase__ : Tuple = model(**__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_A = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 242
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class _UpperCAmelCase ( lowercase__ ):
def __init__( self :List[Any] , *__UpperCamelCase :Optional[int] , **__UpperCamelCase :List[str] ):
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 292
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 0
|
def __lowerCamelCase ( lowerCamelCase__ : List[str] = 10**9 ):
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 2
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 252
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
def a ( lowerCamelCase_ , lowerCamelCase_ = False ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = F"""Expected string as input, found {type(lowerCamelCase_ )}"""
raise ValueError(lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = F"""Expected boolean as use_pascal parameter, found {type(lowerCamelCase_ )}"""
raise ValueError(lowerCamelCase_ )
lowercase__ = input_str.split('''_''' )
lowercase__ = 0 if use_pascal else 1
lowercase__ = words[start_index:]
lowercase__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowercase__ = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 207
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = BertTokenizer
__UpperCAmelCase : List[str] = BertTokenizerFast
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[Any] = filter_non_english
def __UpperCAmelCase ( self ):
super().setUp()
__a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , _a ):
__a = "UNwant\u00E9d,running"
__a = "unwanted, running"
return input_text, output_text
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = "UNwant\u00E9d,running"
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
__a = self.get_tokenizer(do_lower_case=_a )
__a = self.get_rust_tokenizer(do_lower_case=_a )
__a = "UNwant\u00E9d,running"
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
__a = "a\n'll !!to?'d of, can't."
__a = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__a = {}
for i, token in enumerate(_a ):
__a = i
__a = WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__a = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
__a = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
__a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __UpperCAmelCase ( self ):
__a = ["的", "人", "有"]
__a = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = True
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
__a = False
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
__a = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 45
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = tempfile.mkdtemp()
# fmt: off
lowercase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
lowercase = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ):
"""simple docstring"""
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
lowercase = self.get_image_processor()
lowercase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
lowercase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(__lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowercase = "lower newer"
lowercase = processor(text=__lowerCAmelCase )
lowercase = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowercase = "lower newer"
lowercase = self.prepare_image_inputs()
lowercase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCAmelCase ):
processor()
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(__lowerCAmelCase )
lowercase = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowercase = "lower newer"
lowercase = self.prepare_image_inputs()
lowercase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 197
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :List[str] = logging.get_logger(__name__)
__snake_case :List[Any] = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class _A ( lowercase__ ):
UpperCamelCase__ : Optional[Any] = "swin2sr"
UpperCamelCase__ : List[Any] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Tuple=180 , __SCREAMING_SNAKE_CASE : Optional[Any]=[6, 6, 6, 6, 6, 6] , __SCREAMING_SNAKE_CASE : str=[6, 6, 6, 6, 6, 6] , __SCREAMING_SNAKE_CASE : Union[str, Any]=8 , __SCREAMING_SNAKE_CASE : List[Any]=2.0 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : str=1E-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : List[str]=1.0 , __SCREAMING_SNAKE_CASE : Dict="1conv" , __SCREAMING_SNAKE_CASE : int="pixelshuffle" , **__SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = len(__SCREAMING_SNAKE_CASE)
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = layer_norm_eps
__a = initializer_range
__a = upscale
__a = img_range
__a = resi_connection
__a = upsampler
| 49
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 0
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
snake_case_ = get_logger(__name__)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=0 ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase__ : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase__ : Optional[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase__ : str = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase__ : Any = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase__ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(f"""Saving model to {ckpt_dir}""" )
lowercase__ : Dict = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
lowercase__ : List[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
lowercase__ : Dict = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f"""Loading model from {input_model_file}""" )
lowercase__ : Dict = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase__ : Any = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase__ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f"""Loading model from {input_model_file}""" )
lowercase__ : Dict = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase__ : Any = (
os.path.join(SCREAMING_SNAKE_CASE_ , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
lowercase__ : List[Any] = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , )
lowercase__ : Any = state_dict["model"]
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=0 ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase__ : int = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase__ : Optional[int] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
lowercase__ : Any = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase__ : Union[str, Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase__ : Any = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase__ : int = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
lowercase__ : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
lowercase__ : Tuple = (
os.path.join(SCREAMING_SNAKE_CASE_ , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
lowercase__ : int = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , )
lowercase__ : int = optim_state["optimizer"]
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
lowercase__ : str = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
| 214
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 0
|
def lowerCAmelCase_ ( _lowercase : Dict) -> int:
"""simple docstring"""
for i in range(0 , _lowercase):
for _ in range(0 , n - i - 1): # printing spaces
print(""" """ , end="""""")
for _ in range(0 , i + 1): # printing stars
print("""* """ , end="""""")
print()
def lowerCAmelCase_ ( _lowercase : List[Any]) -> str:
"""simple docstring"""
for i in range(_lowercase , 0 , -1):
for _ in range(_lowercase , 0 , -1): # printing stars
print("""* """ , end="""""")
print()
for _ in range(n - i + 1 , 0 , -1): # printing spaces
print(""" """ , end="""""")
def lowerCAmelCase_ ( _lowercase : Dict) -> Optional[Any]:
"""simple docstring"""
if n <= 0:
print(""" ... .... nothing printing :(""")
return
floyd(_lowercase) # upper half
reverse_floyd(_lowercase) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
_lowercase : int =1
while K:
_lowercase : Tuple =int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
_lowercase : str =int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 170
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 0
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=6_4 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Optional[Any]:
snake_case : List[Any] = parent
snake_case : Any = batch_size
snake_case : str = seq_length
snake_case : str = is_training
snake_case : List[Any] = use_input_mask
snake_case : Union[str, Any] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : Any = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Optional[int] = embedding_size
snake_case : str = num_hidden_layers
snake_case : List[str] = num_attention_heads
snake_case : Any = intermediate_size
snake_case : Tuple = hidden_act
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Optional[Any] = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Dict = initializer_range
snake_case : List[str] = num_labels
snake_case : List[str] = num_choices
snake_case : Dict = scope
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Dict = None
if self.use_input_mask:
snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
if self.use_token_type_ids:
snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Any = None
snake_case : Union[str, Any] = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple:
snake_case : List[Any] = MobileBertModel(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A )
snake_case : str = model(A , token_type_ids=A )
snake_case : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> str:
snake_case : List[str] = MobileBertForMaskedLM(config=A )
model.to(A )
model.eval()
snake_case : Optional[int] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> List[Any]:
snake_case : Tuple = MobileBertForNextSentencePrediction(config=A )
model.to(A )
model.eval()
snake_case : Any = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> int:
snake_case : Dict = MobileBertForPreTraining(config=A )
model.to(A )
model.eval()
snake_case : Tuple = model(
A , attention_mask=A , token_type_ids=A , labels=A , next_sentence_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple:
snake_case : Tuple = MobileBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
snake_case : Dict = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> List[str]:
snake_case : Tuple = self.num_labels
snake_case : Union[str, Any] = MobileBertForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : Tuple = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Union[str, Any]:
snake_case : Optional[Any] = self.num_labels
snake_case : int = MobileBertForTokenClassification(config=A )
model.to(A )
model.eval()
snake_case : int = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Optional[Any]:
snake_case : str = self.num_choices
snake_case : Dict = MobileBertForMultipleChoice(config=A )
model.to(A )
model.eval()
snake_case : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : str = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[str] = self.prepare_config_and_inputs()
(
snake_case
) : Any = config_and_inputs
snake_case : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowercase (lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
def UpperCAmelCase ( self , A , A , A=False ) -> Union[str, Any]:
snake_case : str = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
snake_case : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
snake_case : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def UpperCAmelCase ( self ) -> int:
snake_case : List[Any] = MobileBertModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*A )
def UpperCAmelCase ( self ) -> Dict:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*A )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return torch.tensor(
lowercase ,dtype=torch.long ,device=lowercase ,)
lowerCamelCase : List[Any] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[str]:
snake_case : List[str] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(A )
snake_case : Optional[Any] = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
snake_case : Union[str, Any] = model(A )[0]
snake_case : Tuple = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , A )
snake_case : List[Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=A , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
snake_case : Optional[Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
snake_case : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 124
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def a ( A__ : Optional[int] , A__ : List[Any] ) -> str:
"""simple docstring"""
assert isinstance(A__ , A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def a ( A__ : Optional[Any] , A__ : Optional[Any] , A__ : int , A__ : Any ) -> Optional[Any]:
"""simple docstring"""
_lowercase =tmp_path / "cache"
_lowercase ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase =SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_sql_dataset(A__ , A__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def a ( A__ : Tuple , A__ : Union[str, Any] , A__ : List[str] , A__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =tmp_path / "cache"
_lowercase ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowercase =features.copy() if features else default_expected_features
_lowercase =(
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=A__ , cache_dir=A__ ).read()
_check_sql_dataset(A__ , A__ )
def a ( A__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
with contextlib.closing(sqlitea.connect(A__ ) ) as con:
_lowercase =con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def a ( A__ : Union[str, Any] , A__ : str , A__ : List[Any] ) -> List[Any]:
"""simple docstring"""
_lowercase =tmp_path / "cache"
_lowercase =os.path.join(A__ , 'tmp.sql' )
_lowercase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=A__ ).read()
SqlDatasetWriter(A__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
_lowercase =iter_sql_file(A__ )
_lowercase =iter_sql_file(A__ )
for rowa, rowa in zip(A__ , A__ ):
assert rowa == rowa
@require_sqlalchemy
def a ( A__ : int , A__ : int , A__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_lowercase =tmp_path / "cache"
_lowercase =os.path.join(A__ , 'tmp.sql' )
_lowercase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=A__ ).read()
SqlDatasetWriter(A__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
_lowercase =iter_sql_file(A__ )
_lowercase =iter_sql_file(A__ )
for rowa, rowa in zip(A__ , A__ ):
assert rowa == rowa
@require_sqlalchemy
def a ( A__ : Optional[Any] , A__ : int , A__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowercase =tmp_path / "cache"
_lowercase =os.path.join(A__ , 'tmp.sql' )
_lowercase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=A__ ).read()
with pytest.raises(A__ ):
SqlDatasetWriter(A__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 205
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 0
|
"""simple docstring"""
from __future__ import annotations
import bisect
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> Optional[int]:
if hi < 0:
lowerCAmelCase__ : int = len(__UpperCAmelCase )
while lo < hi:
lowerCAmelCase__ : int = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCAmelCase__ : Optional[Any] = mid + 1
else:
lowerCAmelCase__ : List[Any] = mid
return lo
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> str:
if hi < 0:
lowerCAmelCase__ : Union[str, Any] = len(__UpperCAmelCase )
while lo < hi:
lowerCAmelCase__ : str = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCAmelCase__ : List[str] = mid + 1
else:
lowerCAmelCase__ : int = mid
return lo
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> str:
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> str:
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : int = len(__UpperCAmelCase ) - 1
while left <= right:
lowerCAmelCase__ : List[Any] = left + (right - left) // 2
lowerCAmelCase__ : str = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCAmelCase__ : List[Any] = midpoint - 1
else:
lowerCAmelCase__ : int = midpoint + 1
return None
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[str] = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
if right < left:
return None
lowerCAmelCase__ : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
_A = input("""Enter numbers separated by comma:\n""").strip()
_A = sorted(int(item) for item in user_input.split(""","""))
_A = int(input("""Enter a single number to be found in the list:\n"""))
_A = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 242
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 0
|
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = [1]
A = 0, 0, 0
A = ugly_nums[ia] * 2
A = ugly_nums[ia] * 3
A = ugly_nums[ia] * 5
for _ in range(1 , UpperCamelCase ):
A = min(UpperCamelCase , UpperCamelCase , UpperCamelCase )
ugly_nums.append(UpperCamelCase )
if next_num == next_a:
ia += 1
A = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
A = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
A = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 292
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : List[str] = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
import re
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
try:
lowercase__ = split_input(lowerCamelCase_ )
if upper:
lowercase__ = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowercase__ = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def a ( lowerCamelCase_ ):
'''simple docstring'''
return to_simple_case(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
try:
lowercase__ = to_simple_case(lowerCamelCase_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return to_complex_case(lowerCamelCase_ , lowerCamelCase_ , '''_''' )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return to_complex_case(lowerCamelCase_ , lowerCamelCase_ , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 207
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowercase_ = ["gpt2"]
lowercase_ = "gpt2"
if is_tf_available():
class __lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__()
__a = tokenizer
__a = AutoConfig.from_pretrained(_a )
__a = TFGPTaLMHeadModel.from_config(_a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def __UpperCAmelCase ( self , _a ):
__a = self.tokenizer(_a )
__a = tokenized["input_ids"].to_tensor()
__a = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__a = self.model(input_ids=_a , attention_mask=_a )["logits"]
return outputs
@require_tf
@require_keras_nlp
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
super().setUp()
__a = [GPTaTokenizer.from_pretrained(_a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__a = [TFGPTaTokenizer.from_pretrained(_a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__a = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
__a = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __UpperCAmelCase ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__a = tokenizer([test_inputs] , return_tensors='''tf''' )
__a = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__a = python_outputs[key].numpy()
__a = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(_a , tf.intaa ) == tf_outputs_values ) )
@slow
def __UpperCAmelCase ( self ):
for tf_tokenizer in self.tf_tokenizers:
__a = tf.function(_a )
for test_inputs in self.test_sentences:
__a = tf.constant(_a )
__a = compiled_tokenizer(_a )
__a = tf_tokenizer(_a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __UpperCAmelCase ( self ):
for tf_tokenizer in self.tf_tokenizers:
__a = ModelToSave(tokenizer=_a )
__a = tf.convert_to_tensor([self.test_sentences[0]] )
__a = model.serving(_a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__a = Path(_a ) / "saved.model"
tf.saved_model.save(_a , _a , signatures={'''serving_default''': model.serving} )
__a = tf.saved_model.load(_a )
__a = loaded_model.signatures["serving_default"](_a )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __UpperCAmelCase ( self ):
for tf_tokenizer in self.tf_tokenizers:
__a = tf.convert_to_tensor([self.test_sentences[0]] )
__a = tf_tokenizer(_a ) # Build model with some sample inputs
__a = tf_tokenizer.get_config()
__a = TFGPTaTokenizer.from_config(_a )
__a = model_from_config(_a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __UpperCAmelCase ( self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__a = 123_123
for max_length in [3, 5, 1_024]:
__a = tf.convert_to_tensor([self.test_sentences[0]] )
__a = tf_tokenizer(_a , max_length=_a )
__a = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 45
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Dict =get_tests_dir("""fixtures/test_sentencepiece.model""")
__lowerCAmelCase : str ={"target_lang": "fi", "source_lang": "en"}
__lowerCAmelCase : Tuple =">>zh<<"
__lowerCAmelCase : int ="Helsinki-NLP/"
if is_torch_available():
__lowerCAmelCase : Any ="pt"
elif is_tf_available():
__lowerCAmelCase : List[str] ="tf"
else:
__lowerCAmelCase : Optional[Any] ="jax"
@require_sentencepiece
class _A ( lowercase__ , unittest.TestCase ):
snake_case__ : Dict = MarianTokenizer
snake_case__ : str = False
snake_case__ : Tuple = True
def A__ ( self ):
"""simple docstring"""
super().setUp()
lowercase = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowercase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase = Path(self.tmpdirname )
save_json(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowercase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def A__ ( self ):
"""simple docstring"""
lowercase = "</s>"
lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__lowerCAmelCase ) , 9 )
def A__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def A__ ( self ):
"""simple docstring"""
lowercase = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
lowercase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(__lowerCAmelCase , batch.input_ids[0] )
lowercase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowerCAmelCase )
lowercase = [x.name for x in Path(__lowerCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , __lowerCAmelCase )
MarianTokenizer.from_pretrained(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
lowercase = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
lowercase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def A__ ( self ):
"""simple docstring"""
lowercase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowercase = "Tämä on testi"
lowercase = "This is a test"
lowercase = [76, 7, 2047, 2]
lowercase = [69, 12, 11, 940, 2]
lowercase = tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase = tokenizer(text_target=__lowerCAmelCase ).input_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 197
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 0
|
import math
import tensorflow as tf
from packaging import version
def __snake_case ( _UpperCAmelCase ):
__a = tf.convert_to_tensor(_UpperCAmelCase )
__a = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __snake_case ( _UpperCAmelCase ):
__a = tf.convert_to_tensor(_UpperCAmelCase )
__a = tf.cast(math.pi , x.dtype )
__a = tf.cast(0.04_47_15 , x.dtype )
__a = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_UpperCAmelCase , 3 )) ))
return x * cdf
def __snake_case ( _UpperCAmelCase ):
__a = tf.convert_to_tensor(_UpperCAmelCase )
return x * tf.tanh(tf.math.softplus(_UpperCAmelCase ) )
def __snake_case ( _UpperCAmelCase ):
__a = tf.convert_to_tensor(_UpperCAmelCase )
__a = tf.cast(0.04_47_15 , x.dtype )
__a = tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __snake_case ( _UpperCAmelCase ):
__a = tf.convert_to_tensor(_UpperCAmelCase )
__a = tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __snake_case ( _UpperCAmelCase ):
return tf.clip_by_value(_gelu(_UpperCAmelCase ) , -10 , 10 )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=-1 ):
__a = tf.split(_UpperCAmelCase , 2 , axis=_UpperCAmelCase )
return a * tf.math.sigmoid(_UpperCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __snake_case ( _UpperCAmelCase ):
return tf.keras.activations.gelu(_UpperCAmelCase , approximate=_UpperCAmelCase )
__snake_case :Any = tf.keras.activations.gelu
__snake_case :Optional[int] = approximate_gelu_wrap
else:
__snake_case :Optional[int] = _gelu
__snake_case :str = _gelu_new
__snake_case :List[Any] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __snake_case ( _UpperCAmelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 49
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 0
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ = 16
snake_case_ = 32
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class SCREAMING_SNAKE_CASE__ :
def __enter__( self):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a):
gc.collect()
torch.cuda.empty_cache()
lowercase__ : int = torch.cuda.memory_allocated()
lowercase__ : List[Any] = torch.cuda.max_memory_allocated()
lowercase__ : Optional[int] = bamb(self.end - self.begin)
lowercase__ : Dict = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] = 16 , SCREAMING_SNAKE_CASE_ : Tuple = "bert-base-cased" , SCREAMING_SNAKE_CASE_ : int = 320 , SCREAMING_SNAKE_CASE_ : Optional[Any] = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : List[str] = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : int = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Union[str, Any] = config["lr"]
lowercase__ : str = int(config['num_epochs'] )
lowercase__ : str = int(config['seed'] )
lowercase__ : Any = int(config['batch_size'] )
lowercase__ : Optional[int] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
lowercase__ : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : List[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ : Tuple = 1
lowercase__ : Union[str, Any] = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
lowercase__ : Any = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ : str = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
lowercase__ : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : List[Any] = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = outputs.loss
lowercase__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : Any = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Dict = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
'--output_dir' , type=SCREAMING_SNAKE_CASE_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=SCREAMING_SNAKE_CASE_ , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=SCREAMING_SNAKE_CASE_ , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of train epochs.' , )
lowercase__ : List[Any] = parser.parse_args()
lowercase__ : Dict = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 214
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowercase : Union[str, Any] =logging.get_logger(__name__)
def lowerCAmelCase_ ( _lowercase : Tuple) -> Any:
"""simple docstring"""
if isinstance(_lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_lowercase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_lowercase):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''')
class snake_case__ (lowercase__ ):
"""simple docstring"""
__lowerCAmelCase :Optional[int] = ["pixel_values"]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> int:
"""simple docstring"""
super().__init__(**__lowercase )
a__ : Any = size if size is not None else {"shortest_edge": 2_5_6}
a__ : Optional[int] = get_size_dict(__lowercase , default_to_square=__lowercase )
a__ : List[Any] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
a__ : Any = get_size_dict(__lowercase , param_name="""crop_size""" )
a__ : Any = do_resize
a__ : Dict = size
a__ : Any = do_center_crop
a__ : int = crop_size
a__ : Union[str, Any] = resample
a__ : Optional[int] = do_rescale
a__ : Dict = rescale_factor
a__ : List[str] = offset
a__ : int = do_normalize
a__ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ) -> Any:
"""simple docstring"""
a__ : int = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" in size:
a__ : str = get_resize_output_image_size(__lowercase , size["""shortest_edge"""] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
a__ : str = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = None , **__lowercase , ) -> List[str]:
"""simple docstring"""
a__ : int = image.astype(np.floataa )
if offset:
a__ : Optional[int] = image - (scale / 2)
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Union[str, Any]:
"""simple docstring"""
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , ) -> Dict:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
a__ : List[str] = to_numpy_array(__lowercase )
if do_resize:
a__ : List[str] = self.resize(image=__lowercase , size=__lowercase , resample=__lowercase )
if do_center_crop:
a__ : str = self.center_crop(__lowercase , size=__lowercase )
if do_rescale:
a__ : Union[str, Any] = self.rescale(image=__lowercase , scale=__lowercase , offset=__lowercase )
if do_normalize:
a__ : Optional[Any] = self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase )
a__ : Optional[int] = to_channel_dimension_format(__lowercase , __lowercase )
return image
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> Any:
"""simple docstring"""
a__ : int = do_resize if do_resize is not None else self.do_resize
a__ : str = resample if resample is not None else self.resample
a__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
a__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : Tuple = offset if offset is not None else self.offset
a__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
a__ : str = image_mean if image_mean is not None else self.image_mean
a__ : Tuple = image_std if image_std is not None else self.image_std
a__ : str = size if size is not None else self.size
a__ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
a__ : List[Any] = crop_size if crop_size is not None else self.crop_size
a__ : Tuple = get_size_dict(__lowercase , param_name="""crop_size""" )
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
a__ : Optional[Any] = make_batched(__lowercase )
a__ : Optional[int] = [
[
self._preprocess_image(
image=__lowercase , do_resize=__lowercase , size=__lowercase , resample=__lowercase , do_center_crop=__lowercase , crop_size=__lowercase , do_rescale=__lowercase , rescale_factor=__lowercase , offset=__lowercase , do_normalize=__lowercase , image_mean=__lowercase , image_std=__lowercase , data_format=__lowercase , )
for img in video
]
for video in videos
]
a__ : Optional[Any] = {"pixel_values": videos}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 170
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 0
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =[randint(-1000 , 1000 ) for i in range(10 )]
_lowercase =randint(-5000 , 5000 )
return (arr, r)
lowercase_ = make_dataset()
def a ( A__ : Optional[int] , A__ : List[str] ) -> Optional[int]:
"""simple docstring"""
for triplet in permutations(A__ , 3 ):
if sum(A__ ) == target:
return tuple(sorted(A__ ) )
return (0, 0, 0)
def a ( A__ : int , A__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
arr.sort()
_lowercase =len(A__ )
for i in range(n - 1 ):
_lowercase =i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a ( ) -> int:
"""simple docstring"""
_lowercase ="\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
_lowercase ="\ntriplet_sum1(*dataset)\n"
_lowercase ="\ntriplet_sum2(*dataset)\n"
_lowercase =repeat(setup=A__ , stmt=A__ , repeat=5 , number=10000 )
_lowercase =repeat(setup=A__ , stmt=A__ , repeat=5 , number=10000 )
return (min(A__ ), min(A__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 205
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowercase_ ( __UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Tuple = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[str] = emb.weight.shape
lowerCAmelCase__ : str = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
lowerCAmelCase__ : int = emb.weight.data
return lin_layer
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = {}
for old_key in state_dict.keys():
lowerCAmelCase__ : Tuple = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase__ : Union[str, Any] = key.replace("""moe_layer.experts.0""" , f"""ffn.experts.expert_{expert_idx}""" )
else:
lowerCAmelCase__ : List[str] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
lowerCAmelCase__ : Any = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
lowerCAmelCase__ : List[Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
lowerCAmelCase__ : Optional[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
lowerCAmelCase__ : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase__ : Tuple = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
lowerCAmelCase__ : int = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
lowerCAmelCase__ : Optional[Any] = state_dict[old_key]
return new_dict
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = WEIGHTS_NAME ) -> Optional[int]:
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Any = 0
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
for expert in range(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = torch.load(__UpperCAmelCase )["model"]
remove_ignore_keys_(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = rename_fairseq_keys(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = os.path.join(
__UpperCAmelCase , weights_name.replace(""".bin""" , f"""-{len(__UpperCAmelCase )+1:05d}-of-???.bin""" ) )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__UpperCAmelCase )[0]].dtype )
# Add the last block
lowerCAmelCase__ : Optional[int] = os.path.join(__UpperCAmelCase , weights_name.replace(""".bin""" , f"""-{len(__UpperCAmelCase )+1:05d}-of-???.bin""" ) )
lowerCAmelCase__ : Optional[Any] = torch.load(switch_checkpoint_path + """-shared.pt""" )["model"]
remove_ignore_keys_(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = rename_fairseq_keys(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__UpperCAmelCase ) == 1:
lowerCAmelCase__ : List[str] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__UpperCAmelCase , __UpperCAmelCase )
# Otherwise, let's build the index
lowerCAmelCase__ : str = {}
for idx, shard in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-{len(__UpperCAmelCase ):05d}.bin""" )
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
for key in shard:
lowerCAmelCase__ : int = shard_file
# Add the metadata
lowerCAmelCase__ : List[Any] = {"total_size": total_size}
lowerCAmelCase__ : Optional[int] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , """w""" , encoding="""utf-8""" ) as f:
lowerCAmelCase__ : str = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + "\n"
f.write(__UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_A = parser.parse_args()
_A = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
_A = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
_A = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 242
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : Optional[int] = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _UpperCAmelCase ( lowercase__ ):
UpperCamelCase = "sew"
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=32 , __UpperCamelCase :Union[str, Any]=7_68 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :Optional[int]=12 , __UpperCamelCase :List[Any]=30_72 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :List[Any]="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :Dict=0.1 , __UpperCamelCase :Optional[int]=0.1 , __UpperCamelCase :Any=0.0 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Optional[int]=0.1 , __UpperCamelCase :str=0.02 , __UpperCamelCase :str=1e-5 , __UpperCamelCase :Optional[int]="group" , __UpperCamelCase :Any="gelu" , __UpperCamelCase :int=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase :Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCamelCase :Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCamelCase :int=False , __UpperCamelCase :str=1_28 , __UpperCamelCase :Tuple=16 , __UpperCamelCase :str=True , __UpperCamelCase :Any=0.05 , __UpperCamelCase :Any=10 , __UpperCamelCase :Dict=2 , __UpperCamelCase :Tuple=0.0 , __UpperCamelCase :Any=10 , __UpperCamelCase :Optional[Any]=0 , __UpperCamelCase :List[Any]="mean" , __UpperCamelCase :int=False , __UpperCamelCase :int=False , __UpperCamelCase :Optional[int]=2_56 , __UpperCamelCase :Tuple=0 , __UpperCamelCase :Any=1 , __UpperCamelCase :List[Any]=2 , **__UpperCamelCase :Tuple , ):
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
A = hidden_size
A = feat_extract_norm
A = feat_extract_activation
A = list(__UpperCamelCase )
A = list(__UpperCamelCase )
A = list(__UpperCamelCase )
A = conv_bias
A = num_conv_pos_embeddings
A = num_conv_pos_embedding_groups
A = len(self.conv_dim )
A = num_hidden_layers
A = intermediate_size
A = squeeze_factor
A = hidden_act
A = num_attention_heads
A = hidden_dropout
A = attention_dropout
A = activation_dropout
A = feat_proj_dropout
A = final_dropout
A = layerdrop
A = layer_norm_eps
A = initializer_range
A = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A = apply_spec_augment
A = mask_time_prob
A = mask_time_length
A = mask_time_min_masks
A = mask_feature_prob
A = mask_feature_length
A = mask_feature_min_masks
# ctc loss
A = ctc_loss_reduction
A = ctc_zero_infinity
# sequence classification
A = use_weighted_layer_sum
A = classifier_proj_size
@property
def lowerCamelCase ( self :List[Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 292
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = inspect.getfile(accelerate.test_utils )
lowerCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
lowerCamelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
lowerCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def __A ( self ) -> Dict:
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.' )
lowerCamelCase = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def __A ( self ) -> int:
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.' )
lowerCamelCase = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def __A ( self ) -> Optional[int]:
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
lowerCamelCase = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = Accelerator()
UpperCAmelCase : str = (accelerator.state.process_index + 2, 10)
UpperCAmelCase : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
UpperCAmelCase : Optional[int] = ""
UpperCAmelCase : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCAmelCase : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCAmelCase : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 252
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ : Union[str, Any] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ["OwlViTFeatureExtractor"]
A__ : List[Any] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 0
|
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = 0
__a = 0
while num > 0:
__a = num % 8
__a = octal + (remainder * math.floor(math.pow(10 , lowerCAmelCase__ ) ))
counter += 1
__a = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(lowerCAmelCase__ )}'''
def lowercase ( ) -> Tuple:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 45
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(lowerCAmelCase__ ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case :Optional[int] = logging.get_logger(__name__)
__snake_case :str = {"vocab_file": "sentencepiece.bpe.model"}
__snake_case :str = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
__snake_case :List[Any] = {
"camembert-base": 512,
}
__snake_case :Dict = "▁"
class _A ( lowercase__ ):
UpperCamelCase__ : Any = VOCAB_FILES_NAMES
UpperCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str="<s>" , __SCREAMING_SNAKE_CASE : Optional[int]="</s>" , __SCREAMING_SNAKE_CASE : Optional[int]="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , __SCREAMING_SNAKE_CASE : int="<mask>" , __SCREAMING_SNAKE_CASE : List[str]=["<s>NOTUSED", "</s>NOTUSED"] , __SCREAMING_SNAKE_CASE : Tuple = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE))
__a = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__a = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
__a = len(self.fairseq_tokens_to_ids)
__a = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE)
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1]
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = []
__a = ""
__a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE) + token
__a = True
__a = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE)
__a = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE)
return out_string.strip()
def __getstate__( self : List[Any]):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] = None):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(__SCREAMING_SNAKE_CASE , '''wb''') as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 49
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.