code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(_lowercase ):
print(f'''{i}\t\t{d}''' )
def _A ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
for j in range(_lowercase ):
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> list[float]:
"""simple docstring"""
__UpperCamelCase = [float('inf' )] * vertex_count
__UpperCamelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_lowercase ):
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
__UpperCamelCase = distance[u] + w
__UpperCamelCase = check_negative_cycle(_lowercase , _lowercase , _lowercase )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = int(input('''Enter number of vertices: ''').strip())
__snake_case = int(input('''Enter number of edges: ''').strip())
__snake_case = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__snake_case , __snake_case , __snake_case = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__snake_case = {'''src''': src, '''dst''': dest, '''weight''': weight}
__snake_case = int(input('''\nEnter shortest path source:''').strip())
__snake_case = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 310
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 1
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCamelCase :
def __init__( self: Any,A_: str,A_: Dict=16,A_: Dict=13,A_: List[str]=7,A_: Tuple=14,A_: List[str]=10,A_: Dict=19,A_: Dict=5,A_: Union[str, Any]=4,A_: int=True,A_: Optional[int]=16,A_: List[str]=2,A_: Union[str, Any]=4,A_: List[Any]=4,A_: Any="gelu",A_: Union[str, Any]=0.1,A_: Tuple=0.1,A_: Union[str, Any]=[1, 2, 3, 4, 5],A_: Union[str, Any]=25,A_: int=5,):
'''simple docstring'''
__UpperCamelCase = d_model
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = prediction_length
__UpperCamelCase = context_length
__UpperCamelCase = cardinality
__UpperCamelCase = num_time_features
__UpperCamelCase = lags_sequence
__UpperCamelCase = embedding_dimension
__UpperCamelCase = is_training
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = context_length
__UpperCamelCase = prediction_length + label_length
__UpperCamelCase = label_length
__UpperCamelCase = moving_average
__UpperCamelCase = autocorrelation_factor
def snake_case_ ( self: int ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,prediction_length=self.prediction_length,context_length=self.context_length,label_length=self.label_length,lags_sequence=self.lags_sequence,num_time_features=self.num_time_features,num_static_categorical_features=1,cardinality=[self.cardinality],embedding_dimension=[self.embedding_dimension],moving_average=self.moving_average,)
def snake_case_ ( self: str,A_: str ):
'''simple docstring'''
__UpperCamelCase = config.context_length + max(config.lags_sequence )
__UpperCamelCase = ids_tensor([self.batch_size, 1],config.cardinality[0] )
__UpperCamelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__UpperCamelCase = floats_tensor([self.batch_size, _past_length] )
__UpperCamelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__UpperCamelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__UpperCamelCase = floats_tensor([self.batch_size, config.prediction_length] )
__UpperCamelCase = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_config()
__UpperCamelCase = self.prepare_autoformer_inputs_dict(A_ )
return config, inputs_dict
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case_ ( self: List[Any],A_: Union[str, Any],A_: str ):
'''simple docstring'''
__UpperCamelCase = AutoformerModel(config=A_ ).to(A_ ).eval()
__UpperCamelCase = model(**A_ )
__UpperCamelCase = outputs.encoder_last_hidden_state
__UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = model.get_encoder()
encoder.save_pretrained(A_ )
__UpperCamelCase = AutoformerEncoder.from_pretrained(A_ ).to(A_ )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = model.create_network_inputs(**A_ )
__UpperCamelCase, __UpperCamelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__UpperCamelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]),dim=-1,)
__UpperCamelCase = encoder(inputs_embeds=A_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__UpperCamelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...],dim=1 )
.unsqueeze(1 )
.repeat(1,config.prediction_length,1 )
)
__UpperCamelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]],device=enc_input.device,)
__UpperCamelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros),dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
),dim=-1,)
__UpperCamelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean),dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
),dim=-1,)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = model.get_decoder()
decoder.save_pretrained(A_ )
__UpperCamelCase = AutoformerDecoder.from_pretrained(A_ ).to(A_ )
__UpperCamelCase = decoder(
trend=A_,inputs_embeds=A_,encoder_hidden_states=A_,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowercase = (AutoformerForPrediction,) if is_torch_available() else ()
_lowercase = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = AutoformerModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,has_text_modality=A_ )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__UpperCamelCase, __UpperCamelCase = model_class.from_pretrained(A_,output_loading_info=A_ )
self.assertEqual(info['missing_keys'],[] )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A_ )
@unittest.skip(reason='Model has no tokens embeddings' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = inspect.signature(getattr(A_,'forward' ) )
# The main input is the name of the argument after `self`
__UpperCamelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name,A_ )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(A_ )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(A_ )],A_ )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
__UpperCamelCase = getattr(self.model_tester,'seq_length',A_ )
__UpperCamelCase = getattr(self.model_tester,'decoder_seq_length',A_ )
__UpperCamelCase = getattr(self.model_tester,'encoder_seq_length',A_ )
__UpperCamelCase = getattr(self.model_tester,'d_model',A_ )
__UpperCamelCase = getattr(self.model_tester,'num_attention_heads',A_ )
__UpperCamelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(A_,A_ ) )
__UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ),self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCamelCase = True
__UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(A_,A_ ) )
__UpperCamelCase = outputs.encoder_attentions
self.assertEqual(len(A_ ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, encoder_seq_length, dim],)
__UpperCamelCase = len(A_ )
__UpperCamelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A_,A_ )
# decoder attentions
__UpperCamelCase = outputs.decoder_attentions
self.assertIsInstance(A_,(list, tuple) )
self.assertEqual(len(A_ ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, decoder_seq_length, dim],)
# cross attentions
__UpperCamelCase = outputs.cross_attentions
self.assertIsInstance(A_,(list, tuple) )
self.assertEqual(len(A_ ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, decoder_seq_length, dim],)
# Check attention is always last and order is fine
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(A_,A_ ) )
self.assertEqual(out_len + 2,len(A_ ) )
__UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, encoder_seq_length, dim],)
@is_flaky()
def snake_case_ ( self: int ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _A ( _lowercase="train-batch.pt" ) -> str:
"""simple docstring"""
__UpperCamelCase = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=_lowercase , repo_type='dataset' )
__UpperCamelCase = torch.load(_lowercase , map_location=_lowercase )
return batch
@require_torch
@slow
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(A_ )
__UpperCamelCase = prepare_batch()
with torch.no_grad():
__UpperCamelCase = model(
past_values=batch['past_values'],past_time_features=batch['past_time_features'],past_observed_mask=batch['past_observed_mask'],static_categorical_features=batch['static_categorical_features'],future_values=batch['future_values'],future_time_features=batch['future_time_features'],)[0]
__UpperCamelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape,A_ )
__UpperCamelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]],device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3],A_,atol=A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(A_ )
__UpperCamelCase = prepare_batch('val-batch.pt' )
with torch.no_grad():
__UpperCamelCase = model(
past_values=batch['past_values'],past_time_features=batch['past_time_features'],past_observed_mask=batch['past_observed_mask'],static_categorical_features=batch['static_categorical_features'],).encoder_last_hidden_state
__UpperCamelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape,A_ )
__UpperCamelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]],device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3],A_,atol=A_ ) )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(A_ )
__UpperCamelCase = prepare_batch('val-batch.pt' )
with torch.no_grad():
__UpperCamelCase = model.generate(
static_categorical_features=batch['static_categorical_features'],past_time_features=batch['past_time_features'],past_values=batch['past_values'],future_time_features=batch['future_time_features'],past_observed_mask=batch['past_observed_mask'],)
__UpperCamelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape,A_ )
__UpperCamelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6],device=A_ )
__UpperCamelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:],A_,rtol=1E-1 ) )
| 310
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 1
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 1_6
__snake_case = 3_2
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
return int(x / 2**20 )
class __lowerCamelCase :
def __enter__( self: Dict ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__UpperCamelCase = torch.cuda.memory_allocated()
return self
def __exit__( self: Optional[int],*A_: List[str] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__UpperCamelCase = torch.cuda.memory_allocated()
__UpperCamelCase = torch.cuda.max_memory_allocated()
__UpperCamelCase = bamb(self.end - self.begin )
__UpperCamelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" , _lowercase = 3_20 , _lowercase = 1_60 , ) -> Any:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = load_dataset(
'glue' , 'mrpc' , split={'train': f'''train[:{n_train}]''', 'validation': f'''validation[:{n_val}]'''} )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
__UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = args.model_name_or_path
set_seed(_lowercase )
__UpperCamelCase, __UpperCamelCase = get_dataloaders(_lowercase , _lowercase , _lowercase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
__UpperCamelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = {}
for epoch in range(_lowercase , _lowercase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowercase ):
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__UpperCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def _A ( ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowercase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowercase , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowercase , default=_lowercase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowercase , default=3_20 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowercase , default=1_60 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowercase , default=1 , help='Number of train epochs.' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 310
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 1
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('-f' )
__UpperCamelCase = parser.parse_args()
return args.f
class __lowerCamelCase (_a ):
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(A_ )
def snake_case_ ( self: str,A_: List[str] ):
'''simple docstring'''
__UpperCamelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0,'run_glue_deebert.py' )
with patch.object(A_,'argv',A_ ):
__UpperCamelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(A_,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(A_ )
__UpperCamelCase = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(A_ )
__UpperCamelCase = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(A_ )
| 310
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 1
|
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = knapsack(_lowercase , _lowercase , _lowercase , _lowercase , index + 1 )
if weights[index] <= max_weight:
__UpperCamelCase = values[index] + knapsack(
_lowercase , _lowercase , _lowercase , max_weight - weights[index] , index + 1 )
return max(_lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 1
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__snake_case = logging.getLogger(__name__)
class __lowerCamelCase (_a ):
_lowercase = """summarization"""
_lowercase = ["""loss"""]
_lowercase = ROUGE_KEYS
_lowercase = """rouge2"""
def __init__( self: Dict,A_: str,**A_: Union[str, Any] ):
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
__UpperCamelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(A_,num_labels=A_,mode=self.mode,**A_ )
use_task_specific_params(self.model,'summarization' )
save_git_info(self.hparams.output_dir )
__UpperCamelCase = Path(self.output_dir ) / 'metrics.json'
__UpperCamelCase = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams,self.hparams_save_path )
__UpperCamelCase = 0
__UpperCamelCase = defaultdict(A_ )
__UpperCamelCase = self.config.model_type
__UpperCamelCase = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
__UpperCamelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__UpperCamelCase = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
__UpperCamelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__UpperCamelCase = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__UpperCamelCase = get_git_info()['repo_sha']
__UpperCamelCase = hparams.num_workers
__UpperCamelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer,A_ ):
__UpperCamelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__UpperCamelCase = self.decoder_start_token_id
__UpperCamelCase = (
SeqaSeqDataset if hasattr(self.tokenizer,'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
__UpperCamelCase = False
__UpperCamelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__UpperCamelCase = self.hparams.eval_max_gen_length
else:
__UpperCamelCase = self.model.config.max_length
__UpperCamelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case_ ( self: List[Any],A_: Dict[str, torch.Tensor] ):
'''simple docstring'''
__UpperCamelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(A_,Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()},Path(self.output_dir ) / 'tok_batch.json' )
__UpperCamelCase = True
return readable_batch
def snake_case_ ( self: int,A_: List[str],**A_: int ):
'''simple docstring'''
return self.model(A_,**A_ )
def snake_case_ ( self: Tuple,A_: List[int] ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
return lmap(str.strip,A_ )
def snake_case_ ( self: Union[str, Any],A_: dict ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer.pad_token_id
__UpperCamelCase, __UpperCamelCase = batch['input_ids'], batch['attention_mask']
__UpperCamelCase = batch['labels']
if isinstance(self.model,A_ ):
__UpperCamelCase = self.model._shift_right(A_ )
else:
__UpperCamelCase = shift_tokens_right(A_,A_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__UpperCamelCase = decoder_input_ids
self.save_readable_batch(A_ )
__UpperCamelCase = self(A_,attention_mask=A_,decoder_input_ids=A_,use_cache=A_ )
__UpperCamelCase = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__UpperCamelCase = nn.CrossEntropyLoss(ignore_index=A_ )
assert lm_logits.shape[-1] == self.vocab_size
__UpperCamelCase = ce_loss_fct(lm_logits.view(-1,lm_logits.shape[-1] ),tgt_ids.view(-1 ) )
else:
__UpperCamelCase = nn.functional.log_softmax(A_,dim=-1 )
__UpperCamelCase, __UpperCamelCase = label_smoothed_nll_loss(
A_,A_,self.hparams.label_smoothing,ignore_index=A_ )
return (loss,)
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
return self.tokenizer.pad_token_id
def snake_case_ ( self: int,A_: Optional[int],A_: Dict ):
'''simple docstring'''
__UpperCamelCase = self._step(A_ )
__UpperCamelCase = dict(zip(self.loss_names,A_ ) )
# tokens per batch
__UpperCamelCase = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
__UpperCamelCase = batch['input_ids'].shape[0]
__UpperCamelCase = batch['input_ids'].eq(self.pad ).sum()
__UpperCamelCase = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return self._generative_step(A_ )
def snake_case_ ( self: int,A_: List[str],A_: List[Any]="val" ):
'''simple docstring'''
self.step_count += 1
__UpperCamelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__UpperCamelCase = losses['loss']
__UpperCamelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
__UpperCamelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__UpperCamelCase = torch.tensor(A_ ).type_as(A_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(A_ )
__UpperCamelCase = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
__UpperCamelCase = self.step_count
self.metrics[prefix].append(A_ ) # callback writes this to self.metrics_save_path
__UpperCamelCase = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def snake_case_ ( self: Optional[Any],A_: str,A_: str ):
'''simple docstring'''
return calculate_rouge(A_,A_ )
def snake_case_ ( self: Dict,A_: dict ):
'''simple docstring'''
__UpperCamelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__UpperCamelCase = self.model.generate(
batch['input_ids'],attention_mask=batch['attention_mask'],use_cache=A_,decoder_start_token_id=self.decoder_start_token_id,num_beams=self.eval_beams,max_length=self.eval_max_length,)
__UpperCamelCase = (time.time() - ta) / batch['input_ids'].shape[0]
__UpperCamelCase = self.ids_to_clean_text(A_ )
__UpperCamelCase = self.ids_to_clean_text(batch['labels'] )
__UpperCamelCase = self._step(A_ )
__UpperCamelCase = dict(zip(self.loss_names,A_ ) )
__UpperCamelCase = self.calc_generative_metrics(A_,A_ )
__UpperCamelCase = np.mean(lmap(A_,A_ ) )
base_metrics.update(gen_time=A_,gen_len=A_,preds=A_,target=A_,**A_ )
return base_metrics
def snake_case_ ( self: str,A_: int,A_: Optional[Any] ):
'''simple docstring'''
return self._generative_step(A_ )
def snake_case_ ( self: str,A_: List[Any] ):
'''simple docstring'''
return self.validation_epoch_end(A_,prefix='test' )
def snake_case_ ( self: List[Any],A_: int ):
'''simple docstring'''
__UpperCamelCase = self.n_obs[type_path]
__UpperCamelCase = self.target_lens[type_path]
__UpperCamelCase = self.dataset_class(
self.tokenizer,type_path=A_,n_obs=A_,max_target_length=A_,**self.dataset_kwargs,)
return dataset
def snake_case_ ( self: Optional[Any],A_: str,A_: int,A_: bool = False ):
'''simple docstring'''
__UpperCamelCase = self.get_dataset(A_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__UpperCamelCase = dataset.make_sortish_sampler(A_,distributed=self.hparams.gpus > 1 )
return DataLoader(
A_,batch_size=A_,collate_fn=dataset.collate_fn,shuffle=A_,num_workers=self.num_workers,sampler=A_,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__UpperCamelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch,distributed=self.hparams.gpus > 1 )
return DataLoader(
A_,batch_sampler=A_,collate_fn=dataset.collate_fn,num_workers=self.num_workers,)
else:
return DataLoader(
A_,batch_size=A_,collate_fn=dataset.collate_fn,shuffle=A_,num_workers=self.num_workers,sampler=A_,)
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.get_dataloader('train',batch_size=self.hparams.train_batch_size,shuffle=A_ )
return dataloader
def snake_case_ ( self: int ):
'''simple docstring'''
return self.get_dataloader('val',batch_size=self.hparams.eval_batch_size )
def snake_case_ ( self: str ):
'''simple docstring'''
return self.get_dataloader('test',batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case_ ( A_: Dict,A_: Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(A_,A_ )
add_generic_args(A_,A_ )
parser.add_argument(
'--max_source_length',default=1024,type=A_,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
),)
parser.add_argument(
'--max_target_length',default=56,type=A_,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
),)
parser.add_argument(
'--val_max_target_length',default=142,type=A_,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
),)
parser.add_argument(
'--test_max_target_length',default=142,type=A_,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
),)
parser.add_argument('--freeze_encoder',action='store_true' )
parser.add_argument('--freeze_embeds',action='store_true' )
parser.add_argument('--sortish_sampler',action='store_true',default=A_ )
parser.add_argument('--overwrite_output_dir',action='store_true',default=A_ )
parser.add_argument('--max_tokens_per_batch',type=A_,default=A_ )
parser.add_argument('--logger_name',type=A_,choices=['default', 'wandb', 'wandb_shared'],default='default' )
parser.add_argument('--n_train',type=A_,default=-1,required=A_,help='# examples. -1 means use all.' )
parser.add_argument('--n_val',type=A_,default=500,required=A_,help='# examples. -1 means use all.' )
parser.add_argument('--n_test',type=A_,default=-1,required=A_,help='# examples. -1 means use all.' )
parser.add_argument(
'--task',type=A_,default='summarization',required=A_,help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing',type=A_,default=0.0,required=A_ )
parser.add_argument('--src_lang',type=A_,default='',required=A_ )
parser.add_argument('--tgt_lang',type=A_,default='',required=A_ )
parser.add_argument('--eval_beams',type=A_,default=A_,required=A_ )
parser.add_argument(
'--val_metric',type=A_,default=A_,required=A_,choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length',type=A_,default=A_,help='never generate more than n tokens' )
parser.add_argument('--save_top_k',type=A_,default=1,required=A_,help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience',type=A_,default=-1,required=A_,help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
),)
return parser
class __lowerCamelCase (_a ):
_lowercase = """translation"""
_lowercase = ["""loss"""]
_lowercase = ["""bleu"""]
_lowercase = """bleu"""
def __init__( self: Tuple,A_: Any,**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,**A_ )
__UpperCamelCase = hparams.src_lang
__UpperCamelCase = hparams.tgt_lang
def snake_case_ ( self: Any,A_: List[Any],A_: Dict ):
'''simple docstring'''
return calculate_bleu(A_,A_ )
def _A ( _lowercase , _lowercase=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=_lowercase )
check_output_dir(_lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
__UpperCamelCase = SummarizationModule(_lowercase )
else:
__UpperCamelCase = TranslationModule(_lowercase )
__UpperCamelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
__UpperCamelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__UpperCamelCase = os.environ.get('WANDB_PROJECT' , _lowercase )
__UpperCamelCase = WandbLogger(name=model.output_dir.name , project=_lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__UpperCamelCase = WandbLogger(name=model.output_dir.name , project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
__UpperCamelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__UpperCamelCase = False
__UpperCamelCase = args.val_metric == 'loss'
__UpperCamelCase = generic_train(
_lowercase , _lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowercase ) , early_stopping_callback=_lowercase , logger=_lowercase , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
__UpperCamelCase = ''
__UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=_lowercase ) )
if checkpoints:
__UpperCamelCase = checkpoints[-1]
__UpperCamelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = pl.Trainer.add_argparse_args(parser)
__snake_case = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__snake_case = parser.parse_args()
main(args)
| 310
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 1
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__snake_case = {'''UserAgent''': UserAgent().random}
def _A ( _lowercase ) -> dict:
"""simple docstring"""
__UpperCamelCase = script.contents[0]
__UpperCamelCase = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCamelCase :
def __init__( self: int,A_: int ):
'''simple docstring'''
__UpperCamelCase = F'''https://www.instagram.com/{username}/'''
__UpperCamelCase = self.get_json()
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = requests.get(self.url,headers=A_ ).text
__UpperCamelCase = BeautifulSoup(A_,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: List[str] ):
'''simple docstring'''
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: List[str] ):
'''simple docstring'''
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return self.user_data["username"]
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return self.user_data["biography"]
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return self.user_data["is_private"]
def _A ( _lowercase = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__UpperCamelCase = InstagramUser(_lowercase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowercase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = InstagramUser('''github''')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 1
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = torch.nn.Linear(2 , 4 )
__UpperCamelCase = torch.optim.AdamW(model.parameters() , lr=1.0 )
__UpperCamelCase = torch.optim.lr_scheduler.OneCycleLR(_lowercase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__UpperCamelCase = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__UpperCamelCase = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowercase )
class __lowerCamelCase (_a ):
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(A_ ):
__UpperCamelCase = Accelerator(cpu=A_ )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = Accelerator()
__UpperCamelCase = GradientState()
assert state.num_steps == 1
__UpperCamelCase = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__UpperCamelCase = False
assert state.sync_gradients is False
GradientState._reset_state()
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = Accelerator()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = create_components()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = accelerator.prepare(A_,A_,A_,A_,A_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = Accelerator()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = create_components()
accelerator.prepare(A_,A_,A_,A_,A_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*A_: Optional[Any],**A_: str ):
pass
with patch('torch.cuda.set_device',A_ ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
__UpperCamelCase = Accelerator()
self.assertEqual(str(accelerator.state.device ),'cuda:64' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = Accelerator()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = create_components()
accelerator.prepare(A_,A_,A_,A_,A_ )
__UpperCamelCase = get_signature(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1E-3 )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = Accelerator()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = create_components()
accelerator.prepare(A_,A_,A_,A_,A_ )
__UpperCamelCase = get_signature(A_ )
# saving hook
def save_config(A_: str,A_: Union[str, Any],A_: int ):
__UpperCamelCase = {'class_name': models[0].__class__.__name__}
with open(os.path.join(A_,'data.json' ),'w' ) as f:
json.dump(A_,A_ )
# loading hook
def load_config(A_: int,A_: List[Any] ):
with open(os.path.join(A_,'data.json' ),'r' ) as f:
__UpperCamelCase = json.load(A_ )
__UpperCamelCase = config['class_name']
__UpperCamelCase = accelerator.register_save_state_pre_hook(A_ )
__UpperCamelCase = accelerator.register_load_state_pre_hook(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match with hooks
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__UpperCamelCase = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(A_ )
# make sure random weights don't match with hooks removed
load_random_weights(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__UpperCamelCase = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(A_ )
self.assertTrue(abs(model_signature - get_signature(A_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = Accelerator()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = create_components()
__UpperCamelCase = None
# This should work
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_,A_,A_ )
self.assertTrue(dummy_obj is None )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = Accelerator()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = create_components()
__UpperCamelCase = [1, 2, 3]
# This should work
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_,A_,A_ )
self.assertEqual(
getattr(A_,'_is_accelerate_prepared',A_ ),A_,'Dummy object should have `_is_accelerate_prepared` set to `True`',)
self.assertEqual(
getattr(A_,'_is_accelerate_prepared',A_ ),A_,'Model is missing `_is_accelerator_prepared` or is set to `False`',)
self.assertEqual(
getattr(A_,'_is_accelerate_prepared',A_ ),A_,'Optimizer is missing `_is_accelerator_prepared` or is set to `False`',)
self.assertEqual(
getattr(A_,'_is_accelerate_prepared',A_ ),A_,'Scheduler is missing `_is_accelerator_prepared` or is set to `False`',)
self.assertEqual(
getattr(A_,'_is_accelerate_prepared',A_ ),A_,'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`',)
self.assertEqual(
getattr(A_,'_is_accelerate_prepared',A_ ),A_,'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`',)
@slow
@require_bnb
def snake_case_ ( self: Any ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m',load_in_abit=A_,device_map={'': 0},)
__UpperCamelCase = Accelerator()
# This should work
__UpperCamelCase = accelerator.prepare(A_ )
@slow
@require_bnb
def snake_case_ ( self: Tuple ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__UpperCamelCase = Accelerator()
with init_empty_weights():
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m',)
model.tie_weights()
__UpperCamelCase = infer_auto_device_map(A_ )
__UpperCamelCase = 'cpu'
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m',device_map=A_,load_in_abit=A_,llm_inta_enable_fpaa_cpu_offload=A_ )
# This should not work and get value error
with self.assertRaises(A_ ):
__UpperCamelCase = accelerator.prepare(A_ )
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__UpperCamelCase = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m',)
model.tie_weights()
__UpperCamelCase = infer_auto_device_map(A_ )
__UpperCamelCase = 1
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m',load_in_abit=A_,device_map=A_,)
__UpperCamelCase = Accelerator()
# This should not work and get value error
with self.assertRaises(A_ ):
__UpperCamelCase = accelerator.prepare(A_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self: Dict ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m',)
__UpperCamelCase = infer_auto_device_map(A_ )
__UpperCamelCase = 1
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m',load_in_abit=A_,device_map=A_,)
__UpperCamelCase = Accelerator()
# This should work
__UpperCamelCase = accelerator.prepare(A_ )
@require_cuda
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = torch.nn.Linear(10,10 )
__UpperCamelCase = torch.optim.SGD(model.parameters(),lr=0.0_1 )
__UpperCamelCase = Accelerator(cpu=A_ )
__UpperCamelCase = accelerator.prepare(A_ )
| 310
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 1
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCamelCase :
def __init__( self: Any,A_: Optional[int],A_: Tuple=None,A_: Optional[Any]=None,A_: Any=None,A_: List[Any]="resnet50",A_: Dict=3,A_: Dict=32,A_: Union[str, Any]=3,A_: Optional[Any]=True,A_: str=True,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = out_indices if out_indices is not None else [4]
__UpperCamelCase = stage_names
__UpperCamelCase = out_features
__UpperCamelCase = backbone
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = use_pretrained_backbone
__UpperCamelCase = is_training
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = self.get_config()
return config, pixel_values
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size,num_channels=self.num_channels,out_features=self.out_features,out_indices=self.out_indices,stage_names=self.stage_names,use_pretrained_backbone=self.use_pretrained_backbone,backbone=self.backbone,)
def snake_case_ ( self: Any,A_: List[str],A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TimmBackbone(config=A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.feature_map[-1].shape,(self.batch_size, model.channels[-1], 14, 14),)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase, __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCamelCase (_a , _a , _a , unittest.TestCase ):
_lowercase = (TimmBackbone,) if is_torch_available() else ()
_lowercase = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TimmBackboneModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,has_text_modality=A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = 'resnet18'
__UpperCamelCase = 'microsoft/resnet-18'
__UpperCamelCase = AutoBackbone.from_pretrained(A_,use_timm_backbone=A_ )
__UpperCamelCase = AutoBackbone.from_pretrained(A_ )
self.assertEqual(len(timm_model.out_features ),len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ),len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices,(-1,) )
self.assertEqual(transformers_model.out_indices,[len(timm_model.stage_names ) - 1] )
__UpperCamelCase = AutoBackbone.from_pretrained(A_,use_timm_backbone=A_,out_indices=[1, 2, 3] )
__UpperCamelCase = AutoBackbone.from_pretrained(A_,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ),len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def snake_case_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def snake_case_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def snake_case_ ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def snake_case_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('Safetensors is not supported by timm.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(A_ )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1],A_ )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
__UpperCamelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__UpperCamelCase = self.all_model_classes[0]
__UpperCamelCase = model_class(A_ )
model.to(A_ )
__UpperCamelCase = self._prepare_for_class(A_,A_ )
__UpperCamelCase = model(**A_ )
__UpperCamelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__UpperCamelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__UpperCamelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=A_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(**A_ )
self.assertEqual(len(result.feature_maps ),len(config.out_indices ) )
self.assertEqual(len(model.channels ),len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__UpperCamelCase = copy.deepcopy(A_ )
__UpperCamelCase = None
__UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(**A_ )
self.assertEqual(len(result.feature_maps ),1 )
self.assertEqual(len(model.channels ),1 )
# Check backbone can be initialized with fresh weights
__UpperCamelCase = copy.deepcopy(A_ )
__UpperCamelCase = False
__UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase = model(**A_ )
| 310
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 1
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = None
_lowercase = BloomTokenizerFast
_lowercase = BloomTokenizerFast
_lowercase = True
_lowercase = False
_lowercase = """tokenizer_file"""
_lowercase = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
__UpperCamelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self: List[str],**A_: Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
__UpperCamelCase = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
__UpperCamelCase = tokenizer.batch_encode_plus(A_ )['input_ids']
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_,A_ )
def snake_case_ ( self: Dict,A_: Optional[int]=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A_,**A_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCamelCase = 'This is a simple input'
__UpperCamelCase = ['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase = ('This is a simple input', 'This is a pair')
__UpperCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(A_,max_length=A_ )
tokenizer_r.encode_plus(A_,max_length=A_ )
tokenizer_r.batch_encode_plus(A_,max_length=A_ )
tokenizer_r.encode(A_,max_length=A_ )
tokenizer_r.batch_encode_plus(A_,max_length=A_ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
__UpperCamelCase = None # Hotfixing padding = None
self.assertRaises(A_,tokenizer_r.encode,A_,max_length=A_,padding='max_length' )
# Simple input
self.assertRaises(A_,tokenizer_r.encode_plus,A_,max_length=A_,padding='max_length' )
# Simple input
self.assertRaises(
A_,tokenizer_r.batch_encode_plus,A_,max_length=A_,padding='max_length',)
# Pair input
self.assertRaises(A_,tokenizer_r.encode,A_,max_length=A_,padding='max_length' )
# Pair input
self.assertRaises(A_,tokenizer_r.encode_plus,A_,max_length=A_,padding='max_length' )
# Pair input
self.assertRaises(
A_,tokenizer_r.batch_encode_plus,A_,max_length=A_,padding='max_length',)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = load_dataset('xnli','all_languages',split='test',streaming=A_ )
__UpperCamelCase = next(iter(A_ ) )['premise'] # pick up one data
__UpperCamelCase = list(sample_data.values() )
__UpperCamelCase = list(map(tokenizer.encode,A_ ) )
__UpperCamelCase = [tokenizer.decode(A_,clean_up_tokenization_spaces=A_ ) for x in output_tokens]
self.assertListEqual(A_,A_ )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ),1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ),1 )
| 310
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 1
|
def _A ( ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__UpperCamelCase = 6
__UpperCamelCase = 1
__UpperCamelCase = 19_01
__UpperCamelCase = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__UpperCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__UpperCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__UpperCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
__UpperCamelCase = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 310
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 1
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__snake_case = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__snake_case = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return float((preds == labels).mean() )
def _A ( _lowercase , _lowercase , _lowercase="binary" ) -> Any:
"""simple docstring"""
__UpperCamelCase = simple_accuracy(_lowercase , _lowercase )
__UpperCamelCase = float(fa_score(y_true=_lowercase , y_pred=_lowercase , average=_lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = {}
for id_pred, label in zip(_lowercase , _lowercase ):
__UpperCamelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
__UpperCamelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__UpperCamelCase = [(pred, label)]
__UpperCamelCase, __UpperCamelCase = [], []
for question, preds_labels in question_map.items():
__UpperCamelCase, __UpperCamelCase = zip(*_lowercase )
__UpperCamelCase = fa_score(y_true=_lowercase , y_pred=_lowercase , average='macro' )
fas.append(_lowercase )
__UpperCamelCase = int(sum(pred == label for pred, label in preds_labels ) == len(_lowercase ) )
ems.append(_lowercase )
__UpperCamelCase = float(sum(_lowercase ) / len(_lowercase ) )
__UpperCamelCase = sum(_lowercase ) / len(_lowercase )
__UpperCamelCase = float(fa_score(y_true=_lowercase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase (datasets.Metric ):
def snake_case_ ( self: List[str] ):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(self._get_feature_types() ),codebase_urls=[],reference_urls=[],format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None,)
def snake_case_ ( self: Tuple ):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def snake_case_ ( self: Any,A_: int,A_: str ):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(A_,A_ )}
elif self.config_name == "cb":
return acc_and_fa(A_,A_,fa_avg='macro' )
elif self.config_name == "record":
__UpperCamelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
__UpperCamelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(A_,A_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(A_,A_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(A_,A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 310
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 1
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: List[str],**A_: List[str] ):
'''simple docstring'''
pass
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__snake_case = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case_ ( self: int,A_: List[str],A_: Optional[Any],A_: str ):
'''simple docstring'''
__UpperCamelCase = pipeline(
'document-question-answering',model=A_,tokenizer=A_,image_processor=A_ )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = list(zip(*apply_tesseract(load_image(A_ ),A_,'' ) ) )
__UpperCamelCase = 'What is the placebo?'
__UpperCamelCase = [
{
'image': load_image(A_ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def snake_case_ ( self: Dict,A_: Tuple,A_: Dict ):
'''simple docstring'''
__UpperCamelCase = dqa_pipeline(A_,top_k=2 )
self.assertEqual(
A_,[
[
{'score': ANY(A_ ), 'answer': ANY(A_ ), 'start': ANY(A_ ), 'end': ANY(A_ )},
{'score': ANY(A_ ), 'answer': ANY(A_ ), 'start': ANY(A_ ), 'end': ANY(A_ )},
]
]
* 3,)
@require_torch
@require_detectrona
@require_pytesseract
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = pipeline('document-question-answering',model='hf-internal-testing/tiny-random-layoutlmv2' )
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'How many cats are there?'
__UpperCamelCase = [
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
__UpperCamelCase = dqa_pipeline(image=A_,question=A_,top_k=2 )
self.assertEqual(nested_simplify(A_,decimals=4 ),A_ )
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question},top_k=2 )
self.assertEqual(nested_simplify(A_,decimals=4 ),A_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__UpperCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
__UpperCamelCase = dqa_pipeline(image=A_,question=A_,top_k=2 )
self.assertEqual(A_,[] )
# We can optionnally pass directly the words and bounding boxes
__UpperCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = dqa_pipeline(image=A_,question=A_,words=A_,boxes=A_,top_k=2 )
self.assertEqual(A_,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = pipeline(
'document-question-answering',model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa',revision='9977165',)
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=A_,question=A_,top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
],)
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question},top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
],)
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}],top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = pipeline(
'document-question-answering',model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa',revision='9977165',max_seq_len=50,)
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=A_,question=A_,top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
],)
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question},top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
],)
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}],top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2,)
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa',revision='3dc6de3',add_prefix_space=A_ )
__UpperCamelCase = pipeline(
'document-question-answering',model='impira/layoutlm-document-qa',tokenizer=A_,revision='3dc6de3',)
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=A_,question=A_,top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
],)
__UpperCamelCase = dqa_pipeline({'image': image, 'question': question},top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
],)
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}],top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2,)
__UpperCamelCase = list(zip(*apply_tesseract(load_image(A_ ),A_,'' ) ) )
# This model should also work if `image` is set to None
__UpperCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question},top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
],)
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa',revision='3dc6de3',add_prefix_space=A_ )
__UpperCamelCase = pipeline(
'document-question-answering',model='impira/layoutlm-document-qa',tokenizer=A_,revision='3dc6de3',max_seq_len=50,)
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=A_,question=A_,top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
],)
__UpperCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}],top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2,)
__UpperCamelCase = list(zip(*apply_tesseract(load_image(A_ ),A_,'' ) ) )
# This model should also work if `image` is set to None
__UpperCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question},top_k=2 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
],)
@slow
@require_torch
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = pipeline(
'document-question-answering',model='naver-clova-ix/donut-base-finetuned-docvqa',tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ),feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa',)
__UpperCamelCase = INVOICE_URL
__UpperCamelCase = 'What is the invoice number?'
__UpperCamelCase = dqa_pipeline(image=A_,question=A_,top_k=2 )
self.assertEqual(nested_simplify(A_,decimals=4 ),[{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
pass
| 310
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__snake_case = {
'''google/rembert''': 2_5_6,
}
__snake_case = '''▁'''
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = RemBertTokenizer
def __init__( self: Optional[Any],A_: Optional[Any]=None,A_: Tuple=None,A_: Union[str, Any]=True,A_: List[str]=True,A_: Tuple=False,A_: Any="[CLS]",A_: List[Any]="[SEP]",A_: Optional[Any]="<unk>",A_: int="[SEP]",A_: Union[str, Any]="<pad>",A_: List[str]="[CLS]",A_: Union[str, Any]="[MASK]",**A_: str,):
'''simple docstring'''
__UpperCamelCase = AddedToken(A_,lstrip=A_,rstrip=A_ ) if isinstance(A_,A_ ) else mask_token
super().__init__(
A_,tokenizer_file=A_,do_lower_case=A_,remove_space=A_,keep_accents=A_,bos_token=A_,eos_token=A_,unk_token=A_,sep_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,**A_,)
__UpperCamelCase = do_lower_case
__UpperCamelCase = remove_space
__UpperCamelCase = keep_accents
__UpperCamelCase = vocab_file
__UpperCamelCase = False if not self.vocab_file else True
def snake_case_ ( self: int,A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self: Optional[Any],A_: List[int],A_: Optional[List[int]] = None,A_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1]
def snake_case_ ( self: Any,A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self: Union[str, Any],A_: str,A_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(A_ ) )
return
__UpperCamelCase = os.path.join(
A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file,A_ )
return (out_vocab_file,)
| 310
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
with open(_lowercase ) as metadata_file:
__UpperCamelCase = json.load(_lowercase )
__UpperCamelCase = LukeConfig(use_entity_aware_attention=_lowercase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__UpperCamelCase = torch.load(_lowercase , map_location='cpu' )['module']
# Load the entity vocab file
__UpperCamelCase = load_original_entity_vocab(_lowercase )
# add an entry for [MASK2]
__UpperCamelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__UpperCamelCase = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCamelCase = AddedToken('<ent>' , lstrip=_lowercase , rstrip=_lowercase )
__UpperCamelCase = AddedToken('<ent2>' , lstrip=_lowercase , rstrip=_lowercase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_lowercase )
with open(os.path.join(_lowercase , 'tokenizer_config.json' ) , 'r' ) as f:
__UpperCamelCase = json.load(_lowercase )
__UpperCamelCase = 'MLukeTokenizer'
with open(os.path.join(_lowercase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
with open(os.path.join(_lowercase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(_lowercase , _lowercase )
__UpperCamelCase = MLukeTokenizer.from_pretrained(_lowercase )
# Initialize the embeddings of the special tokens
__UpperCamelCase = tokenizer.convert_tokens_to_ids(['@'] )[0]
__UpperCamelCase = tokenizer.convert_tokens_to_ids(['#'] )[0]
__UpperCamelCase = state_dict['embeddings.word_embeddings.weight']
__UpperCamelCase = word_emb[ent_init_index].unsqueeze(0 )
__UpperCamelCase = word_emb[enta_init_index].unsqueeze(0 )
__UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__UpperCamelCase = state_dict[bias_name]
__UpperCamelCase = decoder_bias[ent_init_index].unsqueeze(0 )
__UpperCamelCase = decoder_bias[enta_init_index].unsqueeze(0 )
__UpperCamelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCamelCase = f'''encoder.layer.{layer_index}.attention.self.'''
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight']
__UpperCamelCase = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
__UpperCamelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__UpperCamelCase = state_dict['entity_predictions.bias']
__UpperCamelCase = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
__UpperCamelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
__UpperCamelCase = LukeForMaskedLM(config=_lowercase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
__UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
__UpperCamelCase = state_dict[key]
else:
__UpperCamelCase = state_dict[key]
__UpperCamelCase, __UpperCamelCase = model.load_state_dict(_lowercase , strict=_lowercase )
if set(_lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(_lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__UpperCamelCase = MLukeTokenizer.from_pretrained(_lowercase , task='entity_classification' )
__UpperCamelCase = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
__UpperCamelCase = (0, 9)
__UpperCamelCase = tokenizer(_lowercase , entity_spans=[span] , return_tensors='pt' )
__UpperCamelCase = model(**_lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCamelCase = torch.Size((1, 33, 7_68) )
__UpperCamelCase = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCamelCase = torch.Size((1, 1, 7_68) )
__UpperCamelCase = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowercase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__UpperCamelCase = MLukeTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = 'Tokyo is the capital of <mask>.'
__UpperCamelCase = (24, 30)
__UpperCamelCase = tokenizer(_lowercase , entity_spans=[span] , return_tensors='pt' )
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase = encoding['input_ids'][0].tolist()
__UpperCamelCase = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
__UpperCamelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowercase )
__UpperCamelCase = outputs.entity_logits[0][0].argmax().item()
__UpperCamelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(_lowercase ) )
model.save_pretrained(_lowercase )
def _A ( _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = ['[MASK]', '[PAD]', '[UNK]']
__UpperCamelCase = [json.loads(_lowercase ) for line in open(_lowercase )]
__UpperCamelCase = {}
for entry in data:
__UpperCamelCase = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__UpperCamelCase = entity_id
break
__UpperCamelCase = f'''{language}:{entity_name}'''
__UpperCamelCase = entity_id
return new_mapping
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__snake_case = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
"""simple docstring"""
for attribute in key.split('.' ):
__UpperCamelCase = getattr(_lowercase , _lowercase )
if weight_type is not None:
__UpperCamelCase = getattr(_lowercase , _lowercase ).shape
else:
__UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( _lowercase , _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(_lowercase )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , _lowercase )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "weight" in name:
__UpperCamelCase = 'weight'
elif "bias" in name:
__UpperCamelCase = 'bias'
else:
__UpperCamelCase = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = SEWConfig()
if is_finetuned:
__UpperCamelCase = model.wav_encoder.wav_model.cfg
else:
__UpperCamelCase = model.cfg
__UpperCamelCase = fs_config.conv_bias
__UpperCamelCase = eval(fs_config.conv_feature_layers )
__UpperCamelCase = [x[0] for x in conv_layers]
__UpperCamelCase = [x[1] for x in conv_layers]
__UpperCamelCase = [x[2] for x in conv_layers]
__UpperCamelCase = 'gelu'
__UpperCamelCase = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
__UpperCamelCase = 0.0
__UpperCamelCase = fs_config.activation_fn.name
__UpperCamelCase = fs_config.encoder_embed_dim
__UpperCamelCase = 0.02
__UpperCamelCase = fs_config.encoder_ffn_embed_dim
__UpperCamelCase = 1e-5
__UpperCamelCase = fs_config.encoder_layerdrop
__UpperCamelCase = fs_config.encoder_attention_heads
__UpperCamelCase = fs_config.conv_pos_groups
__UpperCamelCase = fs_config.conv_pos
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = fs_config.encoder_layers
__UpperCamelCase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__UpperCamelCase = model.cfg
__UpperCamelCase = fs_config.final_dropout
__UpperCamelCase = fs_config.layerdrop
__UpperCamelCase = fs_config.activation_dropout
__UpperCamelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__UpperCamelCase = fs_config.attention_dropout
__UpperCamelCase = fs_config.dropout_input
__UpperCamelCase = fs_config.dropout
__UpperCamelCase = fs_config.mask_channel_length
__UpperCamelCase = fs_config.mask_channel_prob
__UpperCamelCase = fs_config.mask_length
__UpperCamelCase = fs_config.mask_prob
__UpperCamelCase = 'Wav2Vec2FeatureExtractor'
__UpperCamelCase = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True ) -> Optional[Any]:
"""simple docstring"""
if is_finetuned:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__UpperCamelCase = SEWConfig.from_pretrained(_lowercase )
else:
__UpperCamelCase = convert_config(model[0] , _lowercase )
__UpperCamelCase = model[0].eval()
__UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
if is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(_lowercase , 'vocab.json' )
if not os.path.isdir(_lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
with open(_lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _lowercase )
__UpperCamelCase = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_lowercase , )
__UpperCamelCase = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
__UpperCamelCase = SEWForCTC(_lowercase )
else:
__UpperCamelCase = SEWModel(_lowercase )
feature_extractor.save_pretrained(_lowercase )
recursively_load_weights(_lowercase , _lowercase , _lowercase )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__snake_case = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 1
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _A ( _lowercase , _lowercase=1 ) -> str:
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def _A ( _lowercase , _lowercase=0 ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = []
for old_item in old_list:
__UpperCamelCase = old_item.replace('in_layers.0' , 'norm1' )
__UpperCamelCase = new_item.replace('in_layers.2' , 'conv1' )
__UpperCamelCase = new_item.replace('out_layers.0' , 'norm2' )
__UpperCamelCase = new_item.replace('out_layers.3' , 'conv2' )
__UpperCamelCase = new_item.replace('emb_layers.1' , 'time_emb_proj' )
__UpperCamelCase = new_item.replace('skip_connection' , 'conv_shortcut' )
__UpperCamelCase = shave_segments(_lowercase , n_shave_prefix_segments=_lowercase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def _A ( _lowercase , _lowercase=0 ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = []
for old_item in old_list:
__UpperCamelCase = old_item
__UpperCamelCase = new_item.replace('norm.weight' , 'group_norm.weight' )
__UpperCamelCase = new_item.replace('norm.bias' , 'group_norm.bias' )
__UpperCamelCase = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
__UpperCamelCase = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
__UpperCamelCase = shave_segments(_lowercase , n_shave_prefix_segments=_lowercase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def _A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ) -> Any:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__UpperCamelCase = old_checkpoint[path]
__UpperCamelCase = old_tensor.shape[0] // 3
__UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__UpperCamelCase = old_tensor.shape[0] // config['num_head_channels'] // 3
__UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 )
__UpperCamelCase = query.reshape(_lowercase )
__UpperCamelCase = key.reshape(_lowercase )
__UpperCamelCase = value.reshape(_lowercase )
for path in paths:
__UpperCamelCase = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__UpperCamelCase = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
__UpperCamelCase = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
__UpperCamelCase = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
__UpperCamelCase = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__UpperCamelCase = old_checkpoint[path['old']][:, :, 0]
else:
__UpperCamelCase = old_checkpoint[path['old']]
def _A ( _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = {}
__UpperCamelCase = checkpoint['time_embed.0.weight']
__UpperCamelCase = checkpoint['time_embed.0.bias']
__UpperCamelCase = checkpoint['time_embed.2.weight']
__UpperCamelCase = checkpoint['time_embed.2.bias']
__UpperCamelCase = checkpoint['input_blocks.0.0.weight']
__UpperCamelCase = checkpoint['input_blocks.0.0.bias']
__UpperCamelCase = checkpoint['out.0.weight']
__UpperCamelCase = checkpoint['out.0.bias']
__UpperCamelCase = checkpoint['out.2.weight']
__UpperCamelCase = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(_lowercase )
}
# Retrieves the keys for the middle blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(_lowercase )
}
# Retrieves the keys for the output blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(_lowercase )
}
for i in range(1 , _lowercase ):
__UpperCamelCase = (i - 1) // (config['num_res_blocks'] + 1)
__UpperCamelCase = (i - 1) % (config['num_res_blocks'] + 1)
__UpperCamelCase = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
__UpperCamelCase = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
__UpperCamelCase = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
__UpperCamelCase = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
__UpperCamelCase = renew_resnet_paths(_lowercase )
__UpperCamelCase = {'old': f'''input_blocks.{i}.0''', 'new': f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
__UpperCamelCase = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
_lowercase , _lowercase , _lowercase , additional_replacements=[meta_path, resnet_op] , config=_lowercase )
if len(_lowercase ):
__UpperCamelCase = renew_attention_paths(_lowercase )
__UpperCamelCase = {
'old': f'''input_blocks.{i}.1''',
'new': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
__UpperCamelCase = {
f'''input_blocks.{i}.1.qkv.bias''': {
'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_lowercase , _lowercase , _lowercase , additional_replacements=[meta_path] , attention_paths_to_split=_lowercase , config=_lowercase , )
__UpperCamelCase = middle_blocks[0]
__UpperCamelCase = middle_blocks[1]
__UpperCamelCase = middle_blocks[2]
__UpperCamelCase = renew_resnet_paths(_lowercase )
assign_to_checkpoint(_lowercase , _lowercase , _lowercase , config=_lowercase )
__UpperCamelCase = renew_resnet_paths(_lowercase )
assign_to_checkpoint(_lowercase , _lowercase , _lowercase , config=_lowercase )
__UpperCamelCase = renew_attention_paths(_lowercase )
__UpperCamelCase = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
_lowercase , _lowercase , _lowercase , attention_paths_to_split=_lowercase , config=_lowercase )
for i in range(_lowercase ):
__UpperCamelCase = i // (config['num_res_blocks'] + 1)
__UpperCamelCase = i % (config['num_res_blocks'] + 1)
__UpperCamelCase = [shave_segments(_lowercase , 2 ) for name in output_blocks[i]]
__UpperCamelCase = {}
for layer in output_block_layers:
__UpperCamelCase, __UpperCamelCase = layer.split('.' )[0], shave_segments(_lowercase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_lowercase )
else:
__UpperCamelCase = [layer_name]
if len(_lowercase ) > 1:
__UpperCamelCase = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
__UpperCamelCase = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
__UpperCamelCase = renew_resnet_paths(_lowercase )
__UpperCamelCase = renew_resnet_paths(_lowercase )
__UpperCamelCase = {'old': f'''output_blocks.{i}.0''', 'new': f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(_lowercase , _lowercase , _lowercase , additional_replacements=[meta_path] , config=_lowercase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__UpperCamelCase = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
__UpperCamelCase = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
__UpperCamelCase = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(_lowercase ) == 2:
__UpperCamelCase = []
if len(_lowercase ):
__UpperCamelCase = renew_attention_paths(_lowercase )
__UpperCamelCase = {
'old': f'''output_blocks.{i}.1''',
'new': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
__UpperCamelCase = {
f'''output_blocks.{i}.1.qkv.bias''': {
'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_lowercase , _lowercase , _lowercase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=_lowercase , )
else:
__UpperCamelCase = renew_resnet_paths(_lowercase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__UpperCamelCase = '.'.join(['output_blocks', str(_lowercase ), path['old']] )
__UpperCamelCase = '.'.join(['up_blocks', str(_lowercase ), 'resnets', str(_lowercase ), path['new']] )
__UpperCamelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__snake_case = parser.parse_args()
__snake_case = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__snake_case = json.loads(f.read())
__snake_case = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__snake_case = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__snake_case = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__snake_case = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__snake_case = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 310
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 1
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _A ( _lowercase ) -> list[list[float]]:
"""simple docstring"""
__UpperCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowercase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__UpperCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
__UpperCamelCase = [[0.0, 0.0], [0.0, 0.0]]
__UpperCamelCase, __UpperCamelCase = matrix[1][1], matrix[0][0]
__UpperCamelCase, __UpperCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowercase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowercase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__UpperCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
__UpperCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__UpperCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__UpperCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__UpperCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__UpperCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__UpperCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__UpperCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__UpperCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__UpperCamelCase = array(_lowercase )
for i in range(3 ):
for j in range(3 ):
__UpperCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__UpperCamelCase = array(_lowercase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowercase )
# Calculate the inverse of the matrix
return [[float(d(_lowercase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 310
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 1
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _A ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = (1 - _cos) / 2
__UpperCamelCase = 1 - _cos
__UpperCamelCase = 1 + alpha
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 - alpha
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = (1 + _cos) / 2
__UpperCamelCase = -1 - _cos
__UpperCamelCase = 1 + alpha
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 - alpha
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = _sin / 2
__UpperCamelCase = 0
__UpperCamelCase = -ba
__UpperCamelCase = 1 + alpha
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 - alpha
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = 1 - alpha
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 + alpha
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = 10 ** (gain_db / 40)
__UpperCamelCase = 1 + alpha * big_a
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 - alpha * big_a
__UpperCamelCase = 1 + alpha / big_a
__UpperCamelCase = -2 * _cos
__UpperCamelCase = 1 - alpha / big_a
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = 10 ** (gain_db / 40)
__UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
__UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
__UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
__UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
__UpperCamelCase = 2 * sqrt(_lowercase ) * alpha
__UpperCamelCase = big_a * (pmc + aaa)
__UpperCamelCase = 2 * big_a * mpc
__UpperCamelCase = big_a * (pmc - aaa)
__UpperCamelCase = ppmc + aaa
__UpperCamelCase = -2 * pmpc
__UpperCamelCase = ppmc - aaa
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCamelCase = tau * frequency / samplerate
__UpperCamelCase = sin(_lowercase )
__UpperCamelCase = cos(_lowercase )
__UpperCamelCase = _sin / (2 * q_factor)
__UpperCamelCase = 10 ** (gain_db / 40)
__UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
__UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
__UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
__UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
__UpperCamelCase = 2 * sqrt(_lowercase ) * alpha
__UpperCamelCase = big_a * (ppmc + aaa)
__UpperCamelCase = -2 * big_a * pmpc
__UpperCamelCase = big_a * (ppmc - aaa)
__UpperCamelCase = pmc + aaa
__UpperCamelCase = 2 * mpc
__UpperCamelCase = pmc - aaa
__UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 310
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 1
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCamelCase (_a ):
def __init__( self: Optional[Any],*A_: Any,A_: Union[str, Any]=None,A_: Tuple=None,**A_: Dict ):
'''simple docstring'''
super().__init__(*A_,**A_ )
__UpperCamelCase = eval_examples
__UpperCamelCase = post_process_function
def snake_case_ ( self: Optional[int],A_: Union[str, Any]=None,A_: List[str]=None,A_: Optional[Any]=None,A_: str = "eval" ):
'''simple docstring'''
__UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
__UpperCamelCase = self.get_eval_dataloader(A_ )
__UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCamelCase = self.compute_metrics
__UpperCamelCase = None
__UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__UpperCamelCase = time.time()
try:
__UpperCamelCase = eval_loop(
A_,description='Evaluation',prediction_loss_only=True if compute_metrics is None else None,ignore_keys=A_,metric_key_prefix=A_,)
finally:
__UpperCamelCase = compute_metrics
__UpperCamelCase = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A_,A_,num_samples=output.num_samples,num_steps=math.ceil(output.num_samples / total_batch_size ),) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__UpperCamelCase = self.post_process_function(A_,A_,output.predictions )
__UpperCamelCase = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__UpperCamelCase = metrics.pop(A_ )
metrics.update(output.metrics )
else:
__UpperCamelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__UpperCamelCase = self.callback_handler.on_evaluate(self.args,self.state,self.control,A_ )
return metrics
def snake_case_ ( self: int,A_: Optional[Any],A_: int,A_: str=None,A_: str = "test" ):
'''simple docstring'''
__UpperCamelCase = self.get_test_dataloader(A_ )
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCamelCase = self.compute_metrics
__UpperCamelCase = None
__UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__UpperCamelCase = time.time()
try:
__UpperCamelCase = eval_loop(
A_,description='Prediction',prediction_loss_only=True if compute_metrics is None else None,ignore_keys=A_,metric_key_prefix=A_,)
finally:
__UpperCamelCase = compute_metrics
__UpperCamelCase = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A_,A_,num_samples=output.num_samples,num_steps=math.ceil(output.num_samples / total_batch_size ),) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__UpperCamelCase = self.post_process_function(A_,A_,output.predictions,'predict' )
__UpperCamelCase = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__UpperCamelCase = metrics.pop(A_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions,label_ids=predictions.label_ids,metrics=A_ )
| 310
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta-xl"""
def __init__( self: str,A_: Union[str, Any]=25_0880,A_: List[Any]=2560,A_: Tuple=36,A_: List[str]=32,A_: str=1_0240,A_: List[Any]="gelu",A_: Dict=0.1,A_: Dict=0.1,A_: Union[str, Any]=514,A_: Optional[int]=1,A_: Any=0.0_2,A_: str=1E-05,A_: str=1,A_: List[Any]=0,A_: Optional[Any]=2,A_: Any="absolute",A_: Tuple=True,A_: Union[str, Any]=None,**A_: Any,):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 1
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _A ( ) -> str:
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class __lowerCamelCase (nn.Module ):
def __init__( self: Tuple ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Linear(3,4 )
__UpperCamelCase = nn.BatchNormad(4 )
__UpperCamelCase = nn.Linear(4,5 )
def snake_case_ ( self: Optional[int],A_: int ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(A_ ) ) )
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A_: List[Any] ):
nonlocal batch_sizes
batch_sizes.append(A_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A_,[128, 64, 32, 16, 8] )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A_: int,A_: List[str] ):
nonlocal batch_sizes
batch_sizes.append(A_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCamelCase, __UpperCamelCase = mock_training_loop_function('hello' )
self.assertListEqual(A_,[128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga],[8, 'hello'] )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A_: int ):
pass
with self.assertRaises(A_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.',cm.exception.args[0] )
def snake_case_ ( self: int ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A_: Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.',cm.exception.args[0] )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A_: Union[str, Any],A_: List[Any],A_: Tuple ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A_ ) as cm:
mock_training_loop_function(128,'hello','world' )
self.assertIn('Batch size was passed into `f`',cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')',cm.exception.args[0] )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A_: List[Any] ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(A_ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!',cm.exception.args[0] )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = torch.cuda.memory_allocated()
__UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated(),A_ )
__UpperCamelCase = release_memory(A_ )
self.assertEqual(torch.cuda.memory_allocated(),A_ )
| 310
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 1
|
__snake_case = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = [False] * len(_lowercase )
__UpperCamelCase = [s]
__UpperCamelCase = True
while queue:
__UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowercase )
__UpperCamelCase = True
__UpperCamelCase = u
return visited[t]
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = [-1] * (len(_lowercase ))
__UpperCamelCase = 0
__UpperCamelCase = []
__UpperCamelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowercase , _lowercase , _lowercase , _lowercase ):
__UpperCamelCase = float('Inf' )
__UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
__UpperCamelCase = min(_lowercase , graph[parent[s]][s] )
__UpperCamelCase = parent[s]
max_flow += path_flow
__UpperCamelCase = sink
while v != source:
__UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCamelCase = parent[v]
for i in range(len(_lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__snake_case = random.Random()
def _A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ) -> List[Any]:
"""simple docstring"""
if rng is None:
__UpperCamelCase = global_rng
__UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCamelCase (unittest.TestCase ):
def __init__( self: Union[str, Any],A_: Dict,A_: List[Any]=7,A_: Any=400,A_: Tuple=2000,A_: Union[str, Any]=2048,A_: Tuple=128,A_: Optional[Any]=1,A_: Optional[int]=512,A_: Tuple=30,A_: Union[str, Any]=4_4100,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = min_seq_length
__UpperCamelCase = max_seq_length
__UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase = spectrogram_length
__UpperCamelCase = feature_size
__UpperCamelCase = num_audio_channels
__UpperCamelCase = hop_length
__UpperCamelCase = chunk_length
__UpperCamelCase = sampling_rate
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case_ ( self: Optional[int],A_: Dict=False,A_: Tuple=False ):
'''simple docstring'''
def _flatten(A_: Tuple ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
__UpperCamelCase = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = TvltFeatureExtractor
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = TvltFeatureExtractionTester(self )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_,'spectrogram_length' ) )
self.assertTrue(hasattr(A_,'feature_size' ) )
self.assertTrue(hasattr(A_,'num_audio_channels' ) )
self.assertTrue(hasattr(A_,'hop_length' ) )
self.assertTrue(hasattr(A_,'chunk_length' ) )
self.assertTrue(hasattr(A_,'sampling_rate' ) )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase = self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase = feat_extract_first.to_dict()
__UpperCamelCase = feat_extract_second.to_dict()
__UpperCamelCase = dict_first.pop('mel_filters' )
__UpperCamelCase = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(A_,A_ ) )
self.assertEqual(A_,A_ )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = os.path.join(A_,'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase = self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase = feat_extract_first.to_dict()
__UpperCamelCase = feat_extract_second.to_dict()
__UpperCamelCase = dict_first.pop('mel_filters' )
__UpperCamelCase = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(A_,A_ ) )
self.assertEqual(A_,A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase = [floats_list((1, x) )[0] for x in range(800,1400,200 )]
__UpperCamelCase = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
__UpperCamelCase = feature_extractor(np_speech_inputs[0],return_tensors='np',sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__UpperCamelCase = feature_extractor(A_,return_tensors='np',sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__UpperCamelCase = feature_extractor(
A_,return_tensors='np',sampling_rate=4_4100,mask_audio=A_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__UpperCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase = np.asarray(A_ )
__UpperCamelCase = feature_extractor(A_,return_tensors='np',sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy','clean',split='validation' )
# automatic decoding with librispeech
__UpperCamelCase = ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self._load_datasamples(1 )
__UpperCamelCase = TvltFeatureExtractor()
__UpperCamelCase = feature_extractor(A_,return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape,(1, 1, 192, 128) )
__UpperCamelCase = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2],A_,atol=1E-4 ) )
| 310
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 1
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__snake_case = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__snake_case = f"""https://www.google.com/search?q={query}&num=100"""
__snake_case = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__snake_case = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__snake_case = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 310
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__snake_case = 3
def _A ( _lowercase ) -> int:
"""simple docstring"""
print('Generating primitive root of p' )
while True:
__UpperCamelCase = random.randrange(3 , _lowercase )
if pow(_lowercase , 2 , _lowercase ) == 1:
continue
if pow(_lowercase , _lowercase , _lowercase ) == 1:
continue
return g
def _A ( _lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('Generating prime p...' )
__UpperCamelCase = rabin_miller.generate_large_prime(_lowercase ) # select large prime number.
__UpperCamelCase = primitive_root(_lowercase ) # one primitive root on modulo p.
__UpperCamelCase = random.randrange(3 , _lowercase ) # private_key -> have to be greater than 2 for safety.
__UpperCamelCase = cryptomath.find_mod_inverse(pow(_lowercase , _lowercase , _lowercase ) , _lowercase )
__UpperCamelCase = (key_size, e_a, e_a, p)
__UpperCamelCase = (key_size, d)
return public_key, private_key
def _A ( _lowercase , _lowercase ) -> None:
"""simple docstring"""
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__UpperCamelCase, __UpperCamelCase = generate_key(_lowercase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , 'w' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , 'w' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def _A ( ) -> None:
"""simple docstring"""
print('Making key files...' )
make_key_files('elgamal' , 20_48 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 310
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
def _A ( _lowercase ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowercase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class __lowerCamelCase (_a ):
_lowercase = ["""pixel_values"""]
def __init__( self: Optional[Any],A_: bool = True,A_: Dict[str, int] = None,A_: PILImageResampling = PILImageResampling.BILINEAR,A_: bool = True,A_: Dict[str, int] = None,A_: bool = True,A_: Union[int, float] = 1 / 255,A_: bool = True,A_: bool = True,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,**A_: Union[str, Any],):
'''simple docstring'''
super().__init__(**A_ )
__UpperCamelCase = size if size is not None else {'shortest_edge': 256}
__UpperCamelCase = get_size_dict(A_,default_to_square=A_ )
__UpperCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase = get_size_dict(A_,param_name='crop_size' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = resample
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = offset
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self: Dict,A_: np.ndarray,A_: Dict[str, int],A_: PILImageResampling = PILImageResampling.BILINEAR,A_: Optional[Union[str, ChannelDimension]] = None,**A_: int,):
'''simple docstring'''
__UpperCamelCase = get_size_dict(A_,default_to_square=A_ )
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(A_,size['shortest_edge'],default_to_square=A_ )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['height'], size['width'])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(A_,size=A_,resample=A_,data_format=A_,**A_ )
def snake_case_ ( self: Tuple,A_: np.ndarray,A_: Dict[str, int],A_: Optional[Union[str, ChannelDimension]] = None,**A_: Tuple,):
'''simple docstring'''
__UpperCamelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(A_,size=(size['height'], size['width']),data_format=A_,**A_ )
def snake_case_ ( self: List[str],A_: np.ndarray,A_: Union[int, float],A_: bool = True,A_: Optional[Union[str, ChannelDimension]] = None,**A_: Optional[Any],):
'''simple docstring'''
__UpperCamelCase = image.astype(np.floataa )
if offset:
__UpperCamelCase = image - (scale / 2)
return rescale(A_,scale=A_,data_format=A_,**A_ )
def snake_case_ ( self: List[str],A_: np.ndarray,A_: Union[float, List[float]],A_: Union[float, List[float]],A_: Optional[Union[str, ChannelDimension]] = None,**A_: List[str],):
'''simple docstring'''
return normalize(A_,mean=A_,std=A_,data_format=A_,**A_ )
def snake_case_ ( self: Union[str, Any],A_: ImageInput,A_: bool = None,A_: Dict[str, int] = None,A_: PILImageResampling = None,A_: bool = None,A_: Dict[str, int] = None,A_: bool = None,A_: float = None,A_: bool = None,A_: bool = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[ChannelDimension] = ChannelDimension.FIRST,):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = to_numpy_array(A_ )
if do_resize:
__UpperCamelCase = self.resize(image=A_,size=A_,resample=A_ )
if do_center_crop:
__UpperCamelCase = self.center_crop(A_,size=A_ )
if do_rescale:
__UpperCamelCase = self.rescale(image=A_,scale=A_,offset=A_ )
if do_normalize:
__UpperCamelCase = self.normalize(image=A_,mean=A_,std=A_ )
__UpperCamelCase = to_channel_dimension_format(A_,A_ )
return image
def snake_case_ ( self: List[str],A_: ImageInput,A_: bool = None,A_: Dict[str, int] = None,A_: PILImageResampling = None,A_: bool = None,A_: Dict[str, int] = None,A_: bool = None,A_: float = None,A_: bool = None,A_: bool = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[str, TensorType]] = None,A_: ChannelDimension = ChannelDimension.FIRST,**A_: List[Any],):
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = offset if offset is not None else self.offset
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(A_,default_to_square=A_ )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(A_,param_name='crop_size' )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__UpperCamelCase = make_batched(A_ )
__UpperCamelCase = [
[
self._preprocess_image(
image=A_,do_resize=A_,size=A_,resample=A_,do_center_crop=A_,crop_size=A_,do_rescale=A_,rescale_factor=A_,offset=A_,do_normalize=A_,image_mean=A_,image_std=A_,data_format=A_,)
for img in video
]
for video in videos
]
__UpperCamelCase = {'pixel_values': videos}
return BatchFeature(data=A_,tensor_type=A_ )
| 310
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 1
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """Pix2StructImageProcessor"""
_lowercase = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self: Union[str, Any],A_: Optional[Any],A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = False
super().__init__(A_,A_ )
def __call__( self: Optional[Any],A_: List[Any]=None,A_: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,A_: bool = True,A_: Union[bool, str, PaddingStrategy] = False,A_: Union[bool, str, TruncationStrategy] = None,A_: Optional[int] = None,A_: Optional[int] = 2048,A_: int = 0,A_: Optional[int] = None,A_: Optional[bool] = None,A_: bool = False,A_: bool = False,A_: bool = False,A_: bool = False,A_: bool = False,A_: bool = True,A_: Optional[Union[str, TensorType]] = None,**A_: Any,):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
__UpperCamelCase = self.tokenizer
__UpperCamelCase = self.tokenizer(
text=A_,add_special_tokens=A_,padding=A_,truncation=A_,max_length=A_,stride=A_,pad_to_multiple_of=A_,return_attention_mask=A_,return_overflowing_tokens=A_,return_special_tokens_mask=A_,return_offsets_mapping=A_,return_token_type_ids=A_,return_length=A_,verbose=A_,return_tensors=A_,**A_,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,max_patches=A_,**A_ )
else:
# add pixel_values and bbox
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,max_patches=A_,header_text=A_,**A_ )
if text is not None and not self.image_processor.is_vqa:
__UpperCamelCase = self.tokenizer(
text=A_,add_special_tokens=A_,padding=A_,truncation=A_,max_length=A_,stride=A_,pad_to_multiple_of=A_,return_attention_mask=A_,return_overflowing_tokens=A_,return_special_tokens_mask=A_,return_offsets_mapping=A_,return_token_type_ids=A_,return_length=A_,verbose=A_,return_tensors=A_,**A_,)
if "attention_mask" in text_encoding:
__UpperCamelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
__UpperCamelCase = text_encoding.pop('input_ids' )
else:
__UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def snake_case_ ( self: Any,*A_: int,**A_: Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: Union[str, Any],**A_: int ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 310
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 42
class __lowerCamelCase (nn.Module ):
def __init__( self: List[str],A_: Dict=3,A_: Union[str, Any]=3,A_: Tuple=("DownEncoderBlock2D",),A_: str=(64,),A_: Tuple=2,A_: str=32,A_: Tuple="silu",A_: Any=True,):
'''simple docstring'''
super().__init__()
__UpperCamelCase = layers_per_block
__UpperCamelCase = torch.nn.Convad(
A_,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__UpperCamelCase = None
__UpperCamelCase = nn.ModuleList([] )
# down
__UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
__UpperCamelCase = output_channel
__UpperCamelCase = block_out_channels[i]
__UpperCamelCase = i == len(A_ ) - 1
__UpperCamelCase = get_down_block(
A_,num_layers=self.layers_per_block,in_channels=A_,out_channels=A_,add_downsample=not is_final_block,resnet_eps=1E-6,downsample_padding=0,resnet_act_fn=A_,resnet_groups=A_,attention_head_dim=A_,temb_channels=A_,)
self.down_blocks.append(A_ )
# mid
__UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1E-6,resnet_act_fn=A_,output_scale_factor=1,resnet_time_scale_shift='default',attention_head_dim=block_out_channels[-1],resnet_groups=A_,temb_channels=A_,)
# out
__UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=A_,eps=1E-6 )
__UpperCamelCase = nn.SiLU()
__UpperCamelCase = 2 * out_channels if double_z else out_channels
__UpperCamelCase = nn.Convad(block_out_channels[-1],A_,3,padding=1 )
__UpperCamelCase = False
def snake_case_ ( self: Tuple,A_: str ):
'''simple docstring'''
__UpperCamelCase = x
__UpperCamelCase = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_: List[Any] ):
def custom_forward(*A_: Optional[Any] ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=','1.11.0' ):
for down_block in self.down_blocks:
__UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ),A_,use_reentrant=A_ )
# middle
__UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),A_,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
__UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ),A_ )
# middle
__UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),A_ )
else:
# down
for down_block in self.down_blocks:
__UpperCamelCase = down_block(A_ )
# middle
__UpperCamelCase = self.mid_block(A_ )
# post-process
__UpperCamelCase = self.conv_norm_out(A_ )
__UpperCamelCase = self.conv_act(A_ )
__UpperCamelCase = self.conv_out(A_ )
return sample
class __lowerCamelCase (nn.Module ):
def __init__( self: str,A_: Any=3,A_: Optional[Any]=3,A_: str=("UpDecoderBlock2D",),A_: Optional[Any]=(64,),A_: Dict=2,A_: Any=32,A_: Dict="silu",A_: Dict="group",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = layers_per_block
__UpperCamelCase = nn.Convad(
A_,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__UpperCamelCase = None
__UpperCamelCase = nn.ModuleList([] )
__UpperCamelCase = in_channels if norm_type == 'spatial' else None
# mid
__UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1E-6,resnet_act_fn=A_,output_scale_factor=1,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=A_,temb_channels=A_,)
# up
__UpperCamelCase = list(reversed(A_ ) )
__UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
__UpperCamelCase = output_channel
__UpperCamelCase = reversed_block_out_channels[i]
__UpperCamelCase = i == len(A_ ) - 1
__UpperCamelCase = get_up_block(
A_,num_layers=self.layers_per_block + 1,in_channels=A_,out_channels=A_,prev_output_channel=A_,add_upsample=not is_final_block,resnet_eps=1E-6,resnet_act_fn=A_,resnet_groups=A_,attention_head_dim=A_,temb_channels=A_,resnet_time_scale_shift=A_,)
self.up_blocks.append(A_ )
__UpperCamelCase = output_channel
# out
if norm_type == "spatial":
__UpperCamelCase = SpatialNorm(block_out_channels[0],A_ )
else:
__UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=A_,eps=1E-6 )
__UpperCamelCase = nn.SiLU()
__UpperCamelCase = nn.Convad(block_out_channels[0],A_,3,padding=1 )
__UpperCamelCase = False
def snake_case_ ( self: Tuple,A_: Optional[Any],A_: List[Any]=None ):
'''simple docstring'''
__UpperCamelCase = z
__UpperCamelCase = self.conv_in(A_ )
__UpperCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_: Tuple ):
def custom_forward(*A_: Dict ):
return module(*A_ )
return custom_forward
if is_torch_version('>=','1.11.0' ):
# middle
__UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),A_,A_,use_reentrant=A_ )
__UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
__UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ),A_,A_,use_reentrant=A_ )
else:
# middle
__UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),A_,A_ )
__UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
__UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ),A_,A_ )
else:
# middle
__UpperCamelCase = self.mid_block(A_,A_ )
__UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
__UpperCamelCase = up_block(A_,A_ )
# post-process
if latent_embeds is None:
__UpperCamelCase = self.conv_norm_out(A_ )
else:
__UpperCamelCase = self.conv_norm_out(A_,A_ )
__UpperCamelCase = self.conv_act(A_ )
__UpperCamelCase = self.conv_out(A_ )
return sample
class __lowerCamelCase (nn.Module ):
def __init__( self: Optional[int],A_: Tuple,A_: Union[str, Any],A_: Optional[int],A_: Tuple=None,A_: Any="random",A_: str=False,A_: Optional[Any]=True ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = n_e
__UpperCamelCase = vq_embed_dim
__UpperCamelCase = beta
__UpperCamelCase = legacy
__UpperCamelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__UpperCamelCase = remap
if self.remap is not None:
self.register_buffer('used',torch.tensor(np.load(self.remap ) ) )
__UpperCamelCase = self.used.shape[0]
__UpperCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__UpperCamelCase = self.re_embed
__UpperCamelCase = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__UpperCamelCase = n_e
__UpperCamelCase = sane_index_shape
def snake_case_ ( self: Any,A_: str ):
'''simple docstring'''
__UpperCamelCase = inds.shape
assert len(A_ ) > 1
__UpperCamelCase = inds.reshape(ishape[0],-1 )
__UpperCamelCase = self.used.to(A_ )
__UpperCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
__UpperCamelCase = match.argmax(-1 )
__UpperCamelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__UpperCamelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__UpperCamelCase = self.unknown_index
return new.reshape(A_ )
def snake_case_ ( self: str,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = inds.shape
assert len(A_ ) > 1
__UpperCamelCase = inds.reshape(ishape[0],-1 )
__UpperCamelCase = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
__UpperCamelCase = 0 # simply set to zero
__UpperCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,A_ )
return back.reshape(A_ )
def snake_case_ ( self: int,A_: Tuple ):
'''simple docstring'''
__UpperCamelCase = z.permute(0,2,3,1 ).contiguous()
__UpperCamelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__UpperCamelCase = torch.argmin(torch.cdist(A_,self.embedding.weight ),dim=1 )
__UpperCamelCase = self.embedding(A_ ).view(z.shape )
__UpperCamelCase = None
__UpperCamelCase = None
# compute loss for embedding
if not self.legacy:
__UpperCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__UpperCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__UpperCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__UpperCamelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__UpperCamelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__UpperCamelCase = self.remap_to_used(A_ )
__UpperCamelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__UpperCamelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self: str,A_: List[Any],A_: List[Any] ):
'''simple docstring'''
if self.remap is not None:
__UpperCamelCase = indices.reshape(shape[0],-1 ) # add batch axis
__UpperCamelCase = self.unmap_to_all(A_ )
__UpperCamelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__UpperCamelCase = self.embedding(A_ )
if shape is not None:
__UpperCamelCase = z_q.view(A_ )
# reshape back to match original input shape
__UpperCamelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: Any,A_: Any=False ):
'''simple docstring'''
__UpperCamelCase = parameters
__UpperCamelCase, __UpperCamelCase = torch.chunk(A_,2,dim=1 )
__UpperCamelCase = torch.clamp(self.logvar,-3_0.0,2_0.0 )
__UpperCamelCase = deterministic
__UpperCamelCase = torch.exp(0.5 * self.logvar )
__UpperCamelCase = torch.exp(self.logvar )
if self.deterministic:
__UpperCamelCase = __UpperCamelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def snake_case_ ( self: Optional[Any],A_: Optional[torch.Generator] = None ):
'''simple docstring'''
__UpperCamelCase = randn_tensor(
self.mean.shape,generator=A_,device=self.parameters.device,dtype=self.parameters.dtype )
__UpperCamelCase = self.mean + self.std * sample
return x
def snake_case_ ( self: List[Any],A_: Optional[int]=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Any=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__UpperCamelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=A_ )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return self.mean
| 310
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 1
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _A ( _lowercase , _lowercase , _lowercase = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(_lowercase ), magnitude * sin(_lowercase )]
return [magnitude * cos(radians(_lowercase ) ), magnitude * sin(radians(_lowercase ) )]
def _A ( _lowercase , _lowercase , _lowercase = 10**-1 ) -> bool:
"""simple docstring"""
__UpperCamelCase = cross(_lowercase , _lowercase )
__UpperCamelCase = sum(_lowercase )
return abs(_lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
__snake_case = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__snake_case = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__snake_case = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__snake_case = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__snake_case = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__snake_case = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 310
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 1
|
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = [0] * len(_lowercase )
__UpperCamelCase = []
__UpperCamelCase = [1] * len(_lowercase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowercase ) ):
if indegree[i] == 0:
queue.append(_lowercase )
while queue:
__UpperCamelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCamelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_lowercase )
print(max(_lowercase ) )
# Adjacency list of Graph
__snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 310
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 1
|
class __lowerCamelCase :
def __init__( self: str,A_: list[int] ):
'''simple docstring'''
__UpperCamelCase = len(A_ )
__UpperCamelCase = [0] * len_array
if len_array > 0:
__UpperCamelCase = array[0]
for i in range(1,A_ ):
__UpperCamelCase = self.prefix_sum[i - 1] + array[i]
def snake_case_ ( self: Tuple,A_: int,A_: int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case_ ( self: Union[str, Any],A_: int ):
'''simple docstring'''
__UpperCamelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_lowercase , id=_lowercase )
| 310
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 1
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case = 2_5_6
class __lowerCamelCase (_a ):
_lowercase = ["""melgan"""]
def __init__( self: List[str],A_: SpectrogramNotesEncoder,A_: SpectrogramContEncoder,A_: TaFilmDecoder,A_: DDPMScheduler,A_: OnnxRuntimeModel if is_onnx_available() else Any,):
'''simple docstring'''
super().__init__()
# From MELGAN
__UpperCamelCase = math.log(1E-5 ) # Matches MelGAN training.
__UpperCamelCase = 4.0 # Largest value for most examples
__UpperCamelCase = 128
self.register_modules(
notes_encoder=A_,continuous_encoder=A_,decoder=A_,scheduler=A_,melgan=A_,)
def snake_case_ ( self: Optional[Any],A_: Tuple,A_: Optional[Any]=(-1.0, 1.0),A_: Dict=False ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = output_range
if clip:
__UpperCamelCase = torch.clip(A_,self.min_value,self.max_value )
# Scale to [0, 1].
__UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def snake_case_ ( self: List[Any],A_: List[Any],A_: Optional[Any]=(-1.0, 1.0),A_: Dict=False ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = input_range
__UpperCamelCase = torch.clip(A_,A_,A_ ) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def snake_case_ ( self: Union[str, Any],A_: int,A_: Dict,A_: List[str] ):
'''simple docstring'''
__UpperCamelCase = input_tokens > 0
__UpperCamelCase, __UpperCamelCase = self.notes_encoder(
encoder_input_tokens=A_,encoder_inputs_mask=A_ )
__UpperCamelCase, __UpperCamelCase = self.continuous_encoder(
encoder_inputs=A_,encoder_inputs_mask=A_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def snake_case_ ( self: Optional[int],A_: str,A_: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = noise_time
if not torch.is_tensor(A_ ):
__UpperCamelCase = torch.tensor([timesteps],dtype=torch.long,device=input_tokens.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
__UpperCamelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0],dtype=timesteps.dtype,device=timesteps.device )
__UpperCamelCase = self.decoder(
encodings_and_masks=A_,decoder_input_tokens=A_,decoder_noise_time=A_ )
return logits
@torch.no_grad()
def __call__( self: Tuple,A_: List[List[int]],A_: Optional[torch.Generator] = None,A_: int = 100,A_: bool = True,A_: str = "numpy",A_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,A_: int = 1,):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_,A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
__UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims],dtype=np.floataa )
__UpperCamelCase = np.zeros([1, 0, self.n_dims],np.floataa )
__UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH),dtype=A_,device=self.device )
for i, encoder_input_tokens in enumerate(A_ ):
if i == 0:
__UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device,dtype=self.decoder.dtype )
# The first chunk has no previous context.
__UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH),dtype=A_,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase = ones
__UpperCamelCase = self.scale_features(
A_,output_range=[-1.0, 1.0],clip=A_ )
__UpperCamelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ),continuous_inputs=A_,continuous_mask=A_,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase = randn_tensor(
shape=encoder_continuous_inputs.shape,generator=A_,device=self.device,dtype=self.decoder.dtype,)
# set step values
self.scheduler.set_timesteps(A_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.decode(
encodings_and_masks=A_,input_tokens=A_,noise_time=t / self.scheduler.config.num_train_timesteps,)
# Compute previous output: x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(A_,A_,A_,generator=A_ ).prev_sample
__UpperCamelCase = self.scale_to_features(A_,input_range=[-1.0, 1.0] )
__UpperCamelCase = mel[:1]
__UpperCamelCase = mel.cpu().float().numpy()
__UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]],axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_,A_ )
logger.info('Generated segment',A_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
__UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__UpperCamelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A_ )
| 310
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__snake_case = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class __lowerCamelCase (unittest.TestCase , _a ):
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = load_tool('text-question-answering' )
self.tool.setup()
__UpperCamelCase = load_tool('text-question-answering',remote=A_ )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.tool(A_,'What did Hugging Face do in April 2021?' )
self.assertEqual(A_,'launched the BigScience Research Workshop' )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.remote_tool(A_,'What did Hugging Face do in April 2021?' )
self.assertEqual(A_,'launched the BigScience Research Workshop' )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.tool(text=A_,question='What did Hugging Face do in April 2021?' )
self.assertEqual(A_,'launched the BigScience Research Workshop' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = self.remote_tool(text=A_,question='What did Hugging Face do in April 2021?' )
self.assertEqual(A_,'launched the BigScience Research Workshop' )
| 310
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 1
|
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
if not (isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__UpperCamelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__UpperCamelCase = i
__UpperCamelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 1
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _A ( _lowercase ) -> bool:
"""simple docstring"""
__UpperCamelCase = int(number**0.5 )
return number == sq * sq
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> tuple[int, int]:
"""simple docstring"""
__UpperCamelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__UpperCamelCase = x_den * y_den * z_den
__UpperCamelCase = gcd(_lowercase , _lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def _A ( _lowercase = 35 ) -> int:
"""simple docstring"""
__UpperCamelCase = set()
__UpperCamelCase = 42
__UpperCamelCase = Fraction(0 )
__UpperCamelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__UpperCamelCase = x_num * y_den + x_den * y_num
__UpperCamelCase = x_den * y_den
__UpperCamelCase = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCamelCase = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
__UpperCamelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__UpperCamelCase = x_den * x_den * y_den * y_den
if is_sq(_lowercase ) and is_sq(_lowercase ):
__UpperCamelCase = int(sqrt(_lowercase ) )
__UpperCamelCase = int(sqrt(_lowercase ) )
__UpperCamelCase = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCamelCase = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=-1
__UpperCamelCase = x_num * y_num
__UpperCamelCase = x_den * y_num + x_num * y_den
__UpperCamelCase = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCamelCase = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
__UpperCamelCase = x_num * x_num * y_num * y_num
__UpperCamelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowercase ) and is_sq(_lowercase ):
__UpperCamelCase = int(sqrt(_lowercase ) )
__UpperCamelCase = int(sqrt(_lowercase ) )
__UpperCamelCase = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCamelCase = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
for num, den in unique_s:
total += Fraction(_lowercase , _lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 310
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 1
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ''
__UpperCamelCase = ''
__UpperCamelCase = []
__UpperCamelCase = 0
__UpperCamelCase = 256
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
def snake_case_ ( self: str,A_: int ):
'''simple docstring'''
__UpperCamelCase = cva.imread(A_,0 )
__UpperCamelCase = copy.deepcopy(self.img )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = plt.hist(self.img.ravel(),256,[0, 256],label='x' )
__UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
__UpperCamelCase = x[i] / self.k
self.sk += prk
__UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
__UpperCamelCase = int(last % last )
__UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
__UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
__UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
__UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg',self.img )
def snake_case_ ( self: Dict ):
'''simple docstring'''
plt.hist(self.img.ravel(),256,[0, 256] )
def snake_case_ ( self: str ):
'''simple docstring'''
cva.imshow('Output-Image',self.img )
cva.imshow('Input-Image',self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__snake_case = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__snake_case = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 310
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 1
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCamelCase (unittest.TestCase ):
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=3,out_channels=3,down_block_types=('DownBlock2D', 'AttnDownBlock2D'),up_block_types=('AttnUpBlock2D', 'UpBlock2D'),)
return model
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.dummy_uncond_unet
__UpperCamelCase = PNDMScheduler()
__UpperCamelCase = PNDMPipeline(unet=A_,scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pndm(generator=A_,num_inference_steps=20,output_type='numpy' ).images
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pndm(generator=A_,num_inference_steps=20,output_type='numpy',return_dict=A_ )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = 'google/ddpm-cifar10-32'
__UpperCamelCase = UNetaDModel.from_pretrained(A_ )
__UpperCamelCase = PNDMScheduler()
__UpperCamelCase = PNDMPipeline(unet=A_,scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pndm(generator=A_,output_type='numpy' ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 310
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 1
|
from manim import *
class __lowerCamelCase (_a ):
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = Rectangle(height=0.5,width=0.5 )
__UpperCamelCase = Rectangle(height=0.2_5,width=0.2_5 )
__UpperCamelCase = Rectangle(height=0.4_6,width=0.4_6 ).set_stroke(width=0 )
__UpperCamelCase = [mem.copy() for i in range(6 )]
__UpperCamelCase = [mem.copy() for i in range(6 )]
__UpperCamelCase = VGroup(*A_ ).arrange(A_,buff=0 )
__UpperCamelCase = VGroup(*A_ ).arrange(A_,buff=0 )
__UpperCamelCase = VGroup(A_,A_ ).arrange(A_,buff=0 )
__UpperCamelCase = Text('CPU',font_size=24 )
__UpperCamelCase = Group(A_,A_ ).arrange(A_,buff=0.5,aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
__UpperCamelCase = [mem.copy() for i in range(4 )]
__UpperCamelCase = VGroup(*A_ ).arrange(A_,buff=0 )
__UpperCamelCase = Text('GPU',font_size=24 )
__UpperCamelCase = Group(A_,A_ ).arrange(A_,buff=0.5,aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
__UpperCamelCase = [mem.copy() for i in range(6 )]
__UpperCamelCase = VGroup(*A_ ).arrange(A_,buff=0 )
__UpperCamelCase = Text('Model',font_size=24 )
__UpperCamelCase = Group(A_,A_ ).arrange(A_,buff=0.5,aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
__UpperCamelCase = Rectangle(height=0.4_6 / 4,width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(A_,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ),buff=0.0_2,direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0],direction=A_,buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1],direction=A_,buff=0.0 )
self.add(A_ )
model_cpu_arr.append(A_ )
self.add(*A_,*A_,*A_ )
__UpperCamelCase = [mem.copy() for i in range(6 )]
__UpperCamelCase = VGroup(*A_ ).arrange(A_,buff=0 )
__UpperCamelCase = Text('Loaded Checkpoint',font_size=24 )
__UpperCamelCase = Group(A_,A_ ).arrange(A_,buff=0.5,aligned_edge=A_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
for i, rect in enumerate(A_ ):
__UpperCamelCase = fill.copy().set_fill(A_,opacity=0.7 )
target.move_to(A_ )
ckpt_arr.append(A_ )
__UpperCamelCase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(A_ )
self.add(*A_,*A_ )
__UpperCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCamelCase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''',font_size=18,)
key_text.move_to([-5, 2.4, 0] )
self.add(A_,A_ )
__UpperCamelCase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''',font_size=18,)
blue_text.next_to(A_,DOWN * 2.4,aligned_edge=key_text.get_left() )
self.add(A_ )
__UpperCamelCase = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''',font_size=24,)
step_a.move_to([2, 2, 0] )
__UpperCamelCase = [meta_mem.copy() for i in range(6 )]
__UpperCamelCase = [meta_mem.copy() for i in range(6 )]
__UpperCamelCase = VGroup(*A_ ).arrange(A_,buff=0 )
__UpperCamelCase = VGroup(*A_ ).arrange(A_,buff=0 )
__UpperCamelCase = VGroup(A_,A_ ).arrange(A_,buff=0 )
__UpperCamelCase = Text('Disk',font_size=24 )
__UpperCamelCase = Group(A_,A_ ).arrange(A_,buff=0.5,aligned_edge=A_ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(A_,run_time=3 ),Write(A_,run_time=1 ),Create(A_,run_time=1 ) )
__UpperCamelCase = []
for i, rect in enumerate(A_ ):
__UpperCamelCase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(A_,run_time=1.5 ) )
self.play(*A_ )
self.play(FadeOut(A_ ) )
__UpperCamelCase = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''',font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_,run_time=3 ) )
self.play(
FadeOut(A_,A_,*A_,*A_ ),)
self.wait()
| 310
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """data2vec-vision"""
def __init__( self: Union[str, Any],A_: List[Any]=768,A_: List[Any]=12,A_: Union[str, Any]=12,A_: int=3072,A_: Tuple="gelu",A_: List[Any]=0.0,A_: Optional[int]=0.0,A_: Any=0.0_2,A_: Tuple=1E-12,A_: Dict=224,A_: Dict=16,A_: Optional[Any]=3,A_: Tuple=False,A_: Union[str, Any]=False,A_: Tuple=False,A_: Optional[Any]=False,A_: int=0.1,A_: Tuple=0.1,A_: int=True,A_: Tuple=[3, 5, 7, 11],A_: List[str]=[1, 2, 3, 6],A_: Optional[int]=True,A_: List[Any]=0.4,A_: Dict=256,A_: Optional[Any]=1,A_: List[str]=False,A_: Optional[int]=255,**A_: List[str],):
'''simple docstring'''
super().__init__(**A_ )
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = use_mask_token
__UpperCamelCase = use_absolute_position_embeddings
__UpperCamelCase = use_relative_position_bias
__UpperCamelCase = use_shared_relative_position_bias
__UpperCamelCase = layer_scale_init_value
__UpperCamelCase = drop_path_rate
__UpperCamelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__UpperCamelCase = out_indices
__UpperCamelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__UpperCamelCase = use_auxiliary_head
__UpperCamelCase = auxiliary_loss_weight
__UpperCamelCase = auxiliary_channels
__UpperCamelCase = auxiliary_num_convs
__UpperCamelCase = auxiliary_concat_input
__UpperCamelCase = semantic_loss_ignore_index
class __lowerCamelCase (_a ):
_lowercase = version.parse("""1.11""" )
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return 1E-4
| 310
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 1
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __lowerCamelCase :
def __init__( self: int,A_: Any=None,**A_: Any ):
'''simple docstring'''
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
__UpperCamelCase = model
__UpperCamelCase = kwargs.get('model_save_dir',A_ )
__UpperCamelCase = kwargs.get('latest_model_name',A_ )
def __call__( self: Optional[int],**A_: str ):
'''simple docstring'''
__UpperCamelCase = {k: np.array(A_ ) for k, v in kwargs.items()}
return self.model.run(A_,A_ )
@staticmethod
def snake_case_ ( A_: Union[str, Path],A_: List[str]=None,A_: int=None ):
'''simple docstring'''
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
__UpperCamelCase = 'CPUExecutionProvider'
return ort.InferenceSession(A_,providers=[provider],sess_options=A_ )
def snake_case_ ( self: Optional[Any],A_: Union[str, Path],A_: Optional[str] = None,**A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__UpperCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__UpperCamelCase = Path(A_ ).joinpath(A_ )
try:
shutil.copyfile(A_,A_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__UpperCamelCase = self.model_save_dir.joinpath(A_ )
if src_path.exists():
__UpperCamelCase = Path(A_ ).joinpath(A_ )
try:
shutil.copyfile(A_,A_ )
except shutil.SameFileError:
pass
def snake_case_ ( self: Dict,A_: Union[str, os.PathLike],**A_: Optional[int],):
'''simple docstring'''
if os.path.isfile(A_ ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(A_,exist_ok=A_ )
# saving model weights/files
self._save_pretrained(A_,**A_ )
@classmethod
def snake_case_ ( cls: Optional[Any],A_: Union[str, Path],A_: Optional[Union[bool, str, None]] = None,A_: Optional[Union[str, None]] = None,A_: bool = False,A_: Optional[str] = None,A_: Optional[str] = None,A_: Optional[str] = None,A_: Optional["ort.SessionOptions"] = None,**A_: Tuple,):
'''simple docstring'''
__UpperCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(A_ ):
__UpperCamelCase = OnnxRuntimeModel.load_model(
os.path.join(A_,A_ ),provider=A_,sess_options=A_ )
__UpperCamelCase = Path(A_ )
# load model from hub
else:
# download model
__UpperCamelCase = hf_hub_download(
repo_id=A_,filename=A_,use_auth_token=A_,revision=A_,cache_dir=A_,force_download=A_,)
__UpperCamelCase = Path(A_ ).parent
__UpperCamelCase = Path(A_ ).name
__UpperCamelCase = OnnxRuntimeModel.load_model(A_,provider=A_,sess_options=A_ )
return cls(model=A_,**A_ )
@classmethod
def snake_case_ ( cls: Dict,A_: Union[str, Path],A_: bool = True,A_: Optional[str] = None,A_: Optional[str] = None,**A_: Optional[int],):
'''simple docstring'''
__UpperCamelCase = None
if len(str(A_ ).split('@' ) ) == 2:
__UpperCamelCase, __UpperCamelCase = model_id.split('@' )
return cls._from_pretrained(
model_id=A_,revision=A_,cache_dir=A_,force_download=A_,use_auth_token=A_,**A_,)
| 310
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__snake_case = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__snake_case = {
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
__snake_case = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = ["""input_ids""", """attention_mask"""]
_lowercase = []
_lowercase = []
def __init__( self: Optional[int],A_: Tuple,A_: Dict="<s>",A_: Any="</s>",A_: Union[str, Any]="</s>",A_: Union[str, Any]="<s>",A_: Any="<unk>",A_: Optional[Any]="<pad>",A_: int="<mask>",A_: str=None,A_: Optional[Any]=None,A_: Tuple=None,A_: Optional[Dict[str, Any]] = None,A_: Tuple=None,A_: Dict=False,**A_: Union[str, Any],):
'''simple docstring'''
__UpperCamelCase = AddedToken(A_,lstrip=A_,rstrip=A_ ) if isinstance(A_,A_ ) else mask_token
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase = legacy_behaviour
super().__init__(
bos_token=A_,eos_token=A_,unk_token=A_,sep_token=A_,cls_token=A_,pad_token=A_,mask_token=A_,tokenizer_file=A_,src_lang=A_,tgt_lang=A_,additional_special_tokens=A_,sp_model_kwargs=self.sp_model_kwargs,legacy_behaviour=A_,**A_,)
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
__UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCamelCase = 1
__UpperCamelCase = len(self.sp_model )
__UpperCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A_ )
}
__UpperCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
__UpperCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__UpperCamelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__UpperCamelCase = src_lang if src_lang is not None else 'eng_Latn'
__UpperCamelCase = self.lang_code_to_id[self._src_lang]
__UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
__UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: Any,A_: List[str] ):
'''simple docstring'''
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self,'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def snake_case_ ( self: Any,A_: str ):
'''simple docstring'''
__UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self: Any,A_: List[int],A_: Optional[List[int]] = None,A_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_,token_ids_a=A_,already_has_special_tokens=A_ )
__UpperCamelCase = [1] * len(self.prefix_tokens )
__UpperCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def snake_case_ ( self: int,A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self: Union[str, Any],A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self: Optional[int],A_: Tuple,A_: str,A_: Optional[str],A_: Optional[str],**A_: Optional[Any] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase = src_lang
__UpperCamelCase = self(A_,add_special_tokens=A_,return_tensors=A_,**A_ )
__UpperCamelCase = self.convert_tokens_to_ids(A_ )
__UpperCamelCase = tgt_lang_id
return inputs
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self: Any,A_: str ):
'''simple docstring'''
return self.sp_model.encode(A_,out_type=A_ )
def snake_case_ ( self: Optional[Any],A_: Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self: Optional[Any],A_: Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
__UpperCamelCase = ''.join(A_ ).replace(A_,' ' ).strip()
return out_string
def snake_case_ ( self: List[str],A_: str,A_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_,'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def snake_case_ ( self: Tuple,A_: List[str],A_: str = "eng_Latn",A_: Optional[List[str]] = None,A_: str = "fra_Latn",**A_: int,):
'''simple docstring'''
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(A_,A_,**A_ )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self: int,A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__UpperCamelCase = []
__UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
__UpperCamelCase = [self.cur_lang_code]
__UpperCamelCase = [self.eos_token_id]
def snake_case_ ( self: str,A_: str ):
'''simple docstring'''
__UpperCamelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__UpperCamelCase = []
__UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
__UpperCamelCase = [self.cur_lang_code]
__UpperCamelCase = [self.eos_token_id]
| 310
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class __lowerCamelCase (_a ):
_lowercase = """funnel"""
_lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self: Tuple,A_: str=3_0522,A_: Optional[Any]=[4, 4, 4],A_: Optional[Any]=None,A_: Optional[int]=2,A_: Tuple=768,A_: str=12,A_: List[Any]=64,A_: Dict=3072,A_: int="gelu_new",A_: Tuple=0.1,A_: Any=0.1,A_: Tuple=0.0,A_: Optional[int]=0.1,A_: Optional[int]=None,A_: Any=1E-9,A_: Any="mean",A_: str="relative_shift",A_: Dict=True,A_: List[Any]=True,A_: List[Any]=True,**A_: List[Any],):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = block_sizes
__UpperCamelCase = [1] * len(A_ ) if block_repeats is None else block_repeats
assert len(A_ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__UpperCamelCase = num_decoder_layers
__UpperCamelCase = d_model
__UpperCamelCase = n_head
__UpperCamelCase = d_head
__UpperCamelCase = d_inner
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_std
__UpperCamelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__UpperCamelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__UpperCamelCase = attention_type
__UpperCamelCase = separate_cls
__UpperCamelCase = truncate_seq
__UpperCamelCase = pool_q_only
super().__init__(**A_ )
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def snake_case_ ( self: Tuple,A_: List[Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def snake_case_ ( self: int,A_: str ):
'''simple docstring'''
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 310
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 1
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor"""]
_lowercase = """SamImageProcessor"""
def __init__( self: int,A_: str ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = self.image_processor
__UpperCamelCase = -10
__UpperCamelCase = self.image_processor.size['longest_edge']
def __call__( self: List[Any],A_: Tuple=None,A_: List[str]=None,A_: Any=None,A_: str=None,A_: Optional[Union[str, TensorType]] = None,**A_: Tuple,):
'''simple docstring'''
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_,)
# pop arguments that are not used in the foward but used nevertheless
__UpperCamelCase = encoding_image_processor['original_sizes']
if hasattr(A_,'numpy' ): # Checks if Torch or TF tensor
__UpperCamelCase = original_sizes.numpy()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self._check_and_preprocess_points(
input_points=A_,input_labels=A_,input_boxes=A_,)
__UpperCamelCase = self._normalize_and_convert(
A_,A_,input_points=A_,input_labels=A_,input_boxes=A_,return_tensors=A_,)
return encoding_image_processor
def snake_case_ ( self: Union[str, Any],A_: str,A_: str,A_: Optional[int]=None,A_: List[Any]=None,A_: Optional[int]=None,A_: List[str]="pt",):
'''simple docstring'''
if input_points is not None:
if len(A_ ) != len(A_ ):
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,original_sizes[0] ) for point in input_points
]
else:
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,A_ )
for point, original_size in zip(A_,A_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__UpperCamelCase, __UpperCamelCase = self._pad_points_and_labels(A_,A_ )
__UpperCamelCase = np.array(A_ )
if input_labels is not None:
__UpperCamelCase = np.array(A_ )
if input_boxes is not None:
if len(A_ ) != len(A_ ):
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,original_sizes[0],is_bounding_box=A_ )
for box in input_boxes
]
else:
__UpperCamelCase = [
self._normalize_coordinates(self.target_size,A_,A_,is_bounding_box=A_ )
for box, original_size in zip(A_,A_ )
]
__UpperCamelCase = np.array(A_ )
if input_boxes is not None:
if return_tensors == "pt":
__UpperCamelCase = torch.from_numpy(A_ )
# boxes batch size of 1 by default
__UpperCamelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__UpperCamelCase = tf.convert_to_tensor(A_ )
# boxes batch size of 1 by default
__UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__UpperCamelCase = torch.from_numpy(A_ )
# point batch size of 1 by default
__UpperCamelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__UpperCamelCase = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
__UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__UpperCamelCase = torch.from_numpy(A_ )
# point batch size of 1 by default
__UpperCamelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__UpperCamelCase = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
__UpperCamelCase = tf.expand_dims(A_,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def snake_case_ ( self: Union[str, Any],A_: Tuple,A_: int ):
'''simple docstring'''
__UpperCamelCase = max([point.shape[0] for point in input_points] )
__UpperCamelCase = []
for i, point in enumerate(A_ ):
if point.shape[0] != expected_nb_points:
__UpperCamelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
__UpperCamelCase = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(A_ )
__UpperCamelCase = processed_input_points
return input_points, input_labels
def snake_case_ ( self: Dict,A_: int,A_: np.ndarray,A_: Optional[Any],A_: Optional[Any]=False ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = original_size
__UpperCamelCase, __UpperCamelCase = self.image_processor._get_preprocess_shape(A_,longest_edge=A_ )
__UpperCamelCase = deepcopy(A_ ).astype(A_ )
if is_bounding_box:
__UpperCamelCase = coords.reshape(-1,2,2 )
__UpperCamelCase = coords[..., 0] * (new_w / old_w)
__UpperCamelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__UpperCamelCase = coords.reshape(-1,4 )
return coords
def snake_case_ ( self: int,A_: Dict=None,A_: List[str]=None,A_: List[str]=None,):
'''simple docstring'''
if input_points is not None:
if hasattr(A_,'numpy' ): # Checks for TF or Torch tensor
__UpperCamelCase = input_points.numpy().tolist()
if not isinstance(A_,A_ ) or not isinstance(input_points[0],A_ ):
raise ValueError('Input points must be a list of list of floating points.' )
__UpperCamelCase = [np.array(A_ ) for input_point in input_points]
else:
__UpperCamelCase = None
if input_labels is not None:
if hasattr(A_,'numpy' ):
__UpperCamelCase = input_labels.numpy().tolist()
if not isinstance(A_,A_ ) or not isinstance(input_labels[0],A_ ):
raise ValueError('Input labels must be a list of list integers.' )
__UpperCamelCase = [np.array(A_ ) for label in input_labels]
else:
__UpperCamelCase = None
if input_boxes is not None:
if hasattr(A_,'numpy' ):
__UpperCamelCase = input_boxes.numpy().tolist()
if (
not isinstance(A_,A_ )
or not isinstance(input_boxes[0],A_ )
or not isinstance(input_boxes[0][0],A_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
__UpperCamelCase = [np.array(A_ ).astype(np.floataa ) for box in input_boxes]
else:
__UpperCamelCase = None
return input_points, input_labels, input_boxes
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(A_ ) )
def snake_case_ ( self: List[Any],*A_: List[Any],**A_: List[str] ):
'''simple docstring'''
return self.image_processor.post_process_masks(*A_,**A_ )
| 310
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 1
|
from math import sqrt
def _A ( _lowercase ) -> bool:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
__UpperCamelCase = True
# 0 and 1 are none primes.
if number <= 1:
__UpperCamelCase = False
for divisor in range(2 , int(round(sqrt(_lowercase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__UpperCamelCase = False
break
# precondition
assert isinstance(_lowercase , _lowercase ), "'status' must been from type bool"
return status
def _A ( _lowercase ) -> Optional[int]:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__UpperCamelCase = list(range(2 , n + 1 ) )
__UpperCamelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__UpperCamelCase = 0
# filters actual prime numbers.
__UpperCamelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list"
return ans
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and (n > 2), "'N' must been an int and > 2"
__UpperCamelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_lowercase ):
ans.append(_lowercase )
# precondition
assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list"
return ans
def _A ( _lowercase ) -> Any:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and number >= 0, "'number' must been an int and >= 0"
__UpperCamelCase = [] # this list will be returns of the function.
# potential prime number factors.
__UpperCamelCase = 2
__UpperCamelCase = number
if number == 0 or number == 1:
ans.append(_lowercase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_lowercase ):
while quotient != 1:
if is_prime(_lowercase ) and (quotient % factor == 0):
ans.append(_lowercase )
quotient /= factor
else:
factor += 1
else:
ans.append(_lowercase )
# precondition
assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list"
return ans
def _A ( _lowercase ) -> str:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__UpperCamelCase = 0
# prime factorization of 'number'
__UpperCamelCase = prime_factorization(_lowercase )
__UpperCamelCase = max(_lowercase )
# precondition
assert isinstance(_lowercase , _lowercase ), "'ans' must been from type int"
return ans
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__UpperCamelCase = 0
# prime factorization of 'number'
__UpperCamelCase = prime_factorization(_lowercase )
__UpperCamelCase = min(_lowercase )
# precondition
assert isinstance(_lowercase , _lowercase ), "'ans' must been from type int"
return ans
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _lowercase ), "compare bust been from type bool"
return number % 2 == 0
def _A ( _lowercase ) -> Optional[int]:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _lowercase ), "compare bust been from type bool"
return number % 2 != 0
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
assert (
isinstance(_lowercase , _lowercase ) and (number > 2) and is_even(_lowercase )
), "'number' must been an int, even and > 2"
__UpperCamelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__UpperCamelCase = get_prime_numbers(_lowercase )
__UpperCamelCase = len(_lowercase )
# run variable for while-loops.
__UpperCamelCase = 0
__UpperCamelCase = None
# exit variable. for break up the loops
__UpperCamelCase = True
while i < len_pn and loop:
__UpperCamelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__UpperCamelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_lowercase , _lowercase )
and (len(_lowercase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__UpperCamelCase = 0
while numbera != 0:
__UpperCamelCase = numbera % numbera
__UpperCamelCase = numbera
__UpperCamelCase = rest
# precondition
assert isinstance(_lowercase , _lowercase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__UpperCamelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__UpperCamelCase = prime_factorization(_lowercase )
__UpperCamelCase = prime_factorization(_lowercase )
elif numbera == 1 or numbera == 1:
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = max(_lowercase , _lowercase )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__UpperCamelCase = prime_fac_a.count(_lowercase )
__UpperCamelCase = prime_fac_a.count(_lowercase )
for _ in range(max(_lowercase , _lowercase ) ):
ans *= n
else:
__UpperCamelCase = prime_fac_a.count(_lowercase )
for _ in range(_lowercase ):
ans *= n
done.append(_lowercase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__UpperCamelCase = prime_fac_a.count(_lowercase )
for _ in range(_lowercase ):
ans *= n
done.append(_lowercase )
# precondition
assert isinstance(_lowercase , _lowercase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'number' must been a positive int"
__UpperCamelCase = 0
__UpperCamelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_lowercase ):
ans += 1
# precondition
assert isinstance(_lowercase , _lowercase ) and is_prime(
_lowercase ), "'ans' must been a prime number and from type int"
return ans
def _A ( _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
assert (
is_prime(_lowercase ) and is_prime(_lowercase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__UpperCamelCase = p_number_a + 1 # jump to the next number
__UpperCamelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_lowercase ):
number += 1
while number < p_number_a:
ans.append(_lowercase )
number += 1
# fetch the next prime number.
while not is_prime(_lowercase ):
number += 1
# precondition
assert (
isinstance(_lowercase , _lowercase )
and ans[0] != p_number_a
and ans[len(_lowercase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and (n >= 1), "'n' must been int and >= 1"
__UpperCamelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_lowercase )
# precondition
assert ans[0] == 1 and ans[len(_lowercase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and (
number > 1
), "'number' must been an int and >= 1"
__UpperCamelCase = get_divisors(_lowercase )
# precondition
assert (
isinstance(_lowercase , _lowercase )
and (divisors[0] == 1)
and (divisors[len(_lowercase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__UpperCamelCase = gcd(abs(_lowercase ) , abs(_lowercase ) )
# precondition
assert (
isinstance(_lowercase , _lowercase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'n' must been a int and >= 0"
__UpperCamelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'n' must been an int and >= 0"
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 1 # this will be return
for _ in range(n - 1 ):
__UpperCamelCase = ans
ans += fiba
__UpperCamelCase = tmp
return ans
| 310
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 1
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _A ( _lowercase ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def _A ( _lowercase , _lowercase ) -> XGBClassifier:
"""simple docstring"""
__UpperCamelCase = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = load_iris()
__UpperCamelCase, __UpperCamelCase = data_handling(_lowercase )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
__UpperCamelCase = iris['target_names']
# Create an XGBoost Classifier from the training data
__UpperCamelCase = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 310
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 1
|
import argparse
import os
import re
__snake_case = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__snake_case = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
__snake_case = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def _A ( _lowercase , _lowercase = False ) -> Optional[Any]:
"""simple docstring"""
with open(_lowercase , 'r' , encoding='utf-8' ) as f:
__UpperCamelCase = f.read()
__UpperCamelCase = content.split('\n' )
__UpperCamelCase = []
__UpperCamelCase = 0
while line_idx < len(_lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__UpperCamelCase = len(re.search(r'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__UpperCamelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__UpperCamelCase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__UpperCamelCase = sorted(_lowercase , key=lambda _lowercase : _re_identifier.search(_lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_lowercase ) )
elif "\n".join(_lowercase ) != content:
return True
def _A ( _lowercase = False ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = [os.path.join(_lowercase , _lowercase ) for f in os.listdir(_lowercase ) if f.endswith('.py' )]
__UpperCamelCase = [sort_auto_mapping(_lowercase , overwrite=_lowercase ) for fname in fnames]
if not overwrite and any(_lowercase ):
__UpperCamelCase = [f for f, d in zip(_lowercase , _lowercase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(_lowercase )}. Run `make style` to fix'''
' this.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__snake_case = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 310
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 1
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__snake_case = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__snake_case = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__snake_case = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__snake_case = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__snake_case = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__snake_case = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__snake_case = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def _A ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = randrange(len(_lowercase ) ), randrange(len(_lowercase ) )
__UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__UpperCamelCase, __UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _A ( _lowercase = 1_00 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(_lowercase ))
@pytest.mark.parametrize('hand, expected' , _lowercase )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
assert PokerHand(_lowercase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _lowercase )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(_lowercase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _lowercase )
def _A ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = PokerHand(_lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _lowercase )
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
assert PokerHand(_lowercase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _lowercase )
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
assert PokerHand(_lowercase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _lowercase )
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def _A ( _lowercase , _lowercase , _lowercase ) -> int:
"""simple docstring"""
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = [PokerHand(_lowercase ) for hand in SORTED_HANDS]
__UpperCamelCase = poker_hands.copy()
shuffle(_lowercase )
__UpperCamelCase = chain(sorted(_lowercase ) )
for index, hand in enumerate(_lowercase ):
assert hand == poker_hands[index]
def _A ( ) -> Any:
"""simple docstring"""
__UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = PokerHand('2C 4S AS 3D 5C' )
__UpperCamelCase = True
__UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = os.path.abspath(os.path.dirname(_lowercase ) )
__UpperCamelCase = os.path.join(_lowercase , 'poker_hands.txt' )
with open(_lowercase ) as file_hand:
for line in file_hand:
__UpperCamelCase = line[:14].strip()
__UpperCamelCase = line[15:].strip()
__UpperCamelCase, __UpperCamelCase = PokerHand(_lowercase ), PokerHand(_lowercase )
__UpperCamelCase = player.compare_with(_lowercase )
if output == "Win":
answer += 1
assert answer == 3_76
| 310
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 1
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__snake_case = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__snake_case = 1_0
__snake_case = 2_5_6
def _A ( _lowercase ) -> Optional[MinHash]:
"""simple docstring"""
if len(_lowercase ) < MIN_NUM_TOKENS:
return None
__UpperCamelCase = MinHash(num_perm=_lowercase )
for token in set(_lowercase ):
min_hash.update(token.encode() )
return min_hash
def _A ( _lowercase ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(_lowercase ) if len(t.strip() ) > 0}
class __lowerCamelCase :
def __init__( self: Any,*,
A_: float = 0.8_5,):
'''simple docstring'''
__UpperCamelCase = duplication_jaccard_threshold
__UpperCamelCase = NUM_PERM
__UpperCamelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold,num_perm=self._num_perm )
__UpperCamelCase = defaultdict(A_ )
def snake_case_ ( self: int,A_: Tuple,A_: MinHash ):
'''simple docstring'''
__UpperCamelCase = self._index.query(A_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(A_,A_ )
if len(A_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A_ )
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCamelCase = [base] + list(A_ )
# reformat the cluster to be a list of dict
__UpperCamelCase = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(A_ )
return duplicate_clusters
def snake_case_ ( self: List[Any],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_duplicate_clusters()
with open(A_,'w' ) as f:
json.dump(A_,A_ )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = element
__UpperCamelCase = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_lowercase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = DuplicationIndex(duplication_jaccard_threshold=_lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowercase ) ) , max_queue_size=1_00 ) ):
di.add(_lowercase , _lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _A ( _lowercase , _lowercase ) -> float:
"""simple docstring"""
__UpperCamelCase = get_tokens(_lowercase )
__UpperCamelCase = get_tokens(_lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__snake_case = None
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = []
for elementa in cluster:
__UpperCamelCase = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
__UpperCamelCase = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_lowercase , _lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCamelCase = 1
extremes.append(_lowercase )
return extremes
def _A ( _lowercase , _lowercase , _lowercase ) -> int:
"""simple docstring"""
global _shared_dataset
__UpperCamelCase = dataset
__UpperCamelCase = []
__UpperCamelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowercase , _lowercase , ) , total=len(_lowercase ) , ):
extremes_list.append(_lowercase )
return extremes_list
def _A ( _lowercase , _lowercase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
__UpperCamelCase = make_duplicate_clusters(_lowercase , _lowercase )
__UpperCamelCase = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
__UpperCamelCase = {}
__UpperCamelCase = find_extremes(_lowercase , _lowercase , _lowercase )
for extremes in extremes_clusters:
for element in extremes:
__UpperCamelCase = element
__UpperCamelCase = duplicate_indices - set(extreme_dict.keys() )
__UpperCamelCase = dataset.filter(lambda _lowercase , _lowercase : idx not in remove_indices , with_indices=_lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCamelCase = element['base_index'] in extreme_dict
if element["is_extreme"]:
__UpperCamelCase = extreme_dict[element['base_index']]['copies']
print(f'''Original dataset size: {len(_lowercase )}''' )
print(f'''Number of duplicate clusters: {len(_lowercase )}''' )
print(f'''Files in duplicate cluster: {len(_lowercase )}''' )
print(f'''Unique files in duplicate cluster: {len(_lowercase )}''' )
print(f'''Filtered dataset size: {len(_lowercase )}''' )
return ds_filter, duplicate_clusters
| 310
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 1
|
import math
def _A ( _lowercase , _lowercase ) -> float:
"""simple docstring"""
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def _A ( _lowercase , _lowercase ) -> float:
"""simple docstring"""
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase (_a ):
_lowercase = """transfo-xl"""
_lowercase = ["""mems"""]
_lowercase = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: Tuple,A_: List[str]=26_7735,A_: str=[2_0000, 4_0000, 20_0000],A_: int=1024,A_: List[Any]=1024,A_: Optional[Any]=16,A_: List[str]=64,A_: Union[str, Any]=4096,A_: Union[str, Any]=4,A_: List[Any]=False,A_: List[str]=18,A_: Tuple=1600,A_: str=1000,A_: Tuple=True,A_: List[str]=True,A_: Optional[Any]=0,A_: List[str]=-1,A_: List[str]=True,A_: int=0.1,A_: Dict=0.0,A_: List[Any]=True,A_: Dict="normal",A_: Optional[int]=0.0_1,A_: List[Any]=0.0_1,A_: int=0.0_2,A_: str=1E-5,A_: Optional[int]=0,**A_: Optional[Any],):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = []
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
__UpperCamelCase = [False] + [True] * len(self.cutoffs )
else:
__UpperCamelCase = [False] + [False] * len(self.cutoffs )
__UpperCamelCase = d_model
__UpperCamelCase = d_embed
__UpperCamelCase = d_head
__UpperCamelCase = d_inner
__UpperCamelCase = div_val
__UpperCamelCase = pre_lnorm
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = mem_len
__UpperCamelCase = same_length
__UpperCamelCase = attn_type
__UpperCamelCase = clamp_len
__UpperCamelCase = sample_softmax
__UpperCamelCase = adaptive
__UpperCamelCase = dropout
__UpperCamelCase = dropatt
__UpperCamelCase = untie_r
__UpperCamelCase = init
__UpperCamelCase = init_range
__UpperCamelCase = proj_init_std
__UpperCamelCase = init_std
__UpperCamelCase = layer_norm_epsilon
super().__init__(eos_token_id=A_,**A_ )
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self: List[Any],A_: str ):
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 310
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _A ( _lowercase , _lowercase , _lowercase=10_24 , _lowercase=10_24 , _lowercase=False , **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = SeqaSeqDataset(_lowercase , _lowercase , _lowercase , _lowercase , type_path='train' , **_lowercase )
__UpperCamelCase = tok.pad_token_id
def get_lens(_lowercase ):
__UpperCamelCase = tqdm(
DataLoader(_lowercase , batch_size=5_12 , num_workers=8 , shuffle=_lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__UpperCamelCase = []
for batch in dl:
__UpperCamelCase = batch['input_ids'].ne(_lowercase ).sum(1 ).tolist()
__UpperCamelCase = batch['labels'].ne(_lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowercase , _lowercase ):
max_lens.append(max(_lowercase , _lowercase ) )
else:
max_lens.extend(_lowercase )
return max_lens
__UpperCamelCase = get_lens(_lowercase )
__UpperCamelCase = SeqaSeqDataset(_lowercase , _lowercase , _lowercase , _lowercase , type_path='val' , **_lowercase )
__UpperCamelCase = get_lens(_lowercase )
pickle_save(_lowercase , train_ds.len_file )
pickle_save(_lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 310
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 1
|
from __future__ import annotations
import requests
__snake_case = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def _A ( _lowercase , _lowercase = 1 , _lowercase = "new" , _lowercase = None ) -> dict:
"""simple docstring"""
__UpperCamelCase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowercase ) - valid_terms ) ):
__UpperCamelCase = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(_lowercase )
__UpperCamelCase = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'User-agent': 'A random string'} , )
if response.status_code == 4_29:
raise requests.HTTPError
__UpperCamelCase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowercase )}
__UpperCamelCase = {}
for id_ in range(_lowercase ):
__UpperCamelCase = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 310
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 1
|
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
assert x is not None
assert y is not None
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = len(_lowercase )
# declaring the array for storing the dp values
__UpperCamelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__UpperCamelCase = 1 if x[i - 1] == y[j - 1] else 0
__UpperCamelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__UpperCamelCase = ''
__UpperCamelCase, __UpperCamelCase = m, n
while i > 0 and j > 0:
__UpperCamelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__UpperCamelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__snake_case = '''AGGTAB'''
__snake_case = '''GXTXAYB'''
__snake_case = 4
__snake_case = '''GTAB'''
__snake_case , __snake_case = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 310
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 1
|
from typing import Any
class __lowerCamelCase :
def __init__( self: List[str],A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Optional[Any] ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: Tuple ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Optional[Any] ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: Tuple,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: Optional[int],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: str,A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[str],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Tuple,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: Union[str, Any] ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: Any ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: str,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> int:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 310
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 1
|
def _A ( _lowercase , _lowercase , _lowercase ) -> float:
"""simple docstring"""
__UpperCamelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _A ( ) -> Optional[int]:
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 1
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__UpperCamelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__UpperCamelCase = 4
__UpperCamelCase = 48
__UpperCamelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__UpperCamelCase = [6, 6, 6, 6]
__UpperCamelCase = 60
__UpperCamelCase = [6, 6, 6, 6]
__UpperCamelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__UpperCamelCase = 4
__UpperCamelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__UpperCamelCase = 1
__UpperCamelCase = 1
__UpperCamelCase = 1_26
__UpperCamelCase = 7
__UpperCamelCase = 2_55.0
__UpperCamelCase = ''
return config
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
__UpperCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__UpperCamelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__UpperCamelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__UpperCamelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__UpperCamelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__UpperCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__UpperCamelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__UpperCamelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__UpperCamelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__UpperCamelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__UpperCamelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__UpperCamelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__UpperCamelCase = 'layernorm.weight'
if name == "norm.bias":
__UpperCamelCase = 'layernorm.bias'
if "conv_first" in name:
__UpperCamelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__UpperCamelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__UpperCamelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__UpperCamelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__UpperCamelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
__UpperCamelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__UpperCamelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__UpperCamelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__UpperCamelCase = 'swin2sr.' + name
return name
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
__UpperCamelCase = key.split('.' )
__UpperCamelCase = int(key_split[1] )
__UpperCamelCase = int(key_split[4] )
__UpperCamelCase = config.embed_dim
if "weight" in key:
__UpperCamelCase = val[:dim, :]
__UpperCamelCase = val[dim : dim * 2, :]
__UpperCamelCase = val[-dim:, :]
else:
__UpperCamelCase = val[:dim]
__UpperCamelCase = val[dim : dim * 2]
__UpperCamelCase = val[-dim:]
pass
else:
__UpperCamelCase = val
return orig_state_dict
def _A ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = get_config(_lowercase )
__UpperCamelCase = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
__UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase , map_location='cpu' )
__UpperCamelCase = convert_state_dict(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = model.load_state_dict(_lowercase , strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
__UpperCamelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
__UpperCamelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__UpperCamelCase = 1_26 if 'Jpeg' in checkpoint_url else 2_56
__UpperCamelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__UpperCamelCase = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
__UpperCamelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
__UpperCamelCase = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__UpperCamelCase = torch.Size([1, 3, 5_12, 5_12] )
__UpperCamelCase = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__UpperCamelCase = torch.Size([1, 3, 10_24, 10_24] )
__UpperCamelCase = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__UpperCamelCase = torch.Size([1, 3, 10_24, 10_24] )
__UpperCamelCase = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__UpperCamelCase = torch.Size([1, 3, 5_12, 5_12] )
__UpperCamelCase = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__UpperCamelCase = torch.Size([1, 3, 10_24, 10_24] )
__UpperCamelCase = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _lowercase , atol=1e-3 )
print('Looks ok!' )
__UpperCamelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__UpperCamelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__snake_case = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 310
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = b.T
__UpperCamelCase = np.sum(np.square(_lowercase ) , axis=1 )
__UpperCamelCase = np.sum(np.square(_lowercase ) , axis=0 )
__UpperCamelCase = np.matmul(_lowercase , _lowercase )
__UpperCamelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def _A ( _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = x.reshape(-1 , 3 )
__UpperCamelCase = squared_euclidean_distance(_lowercase , _lowercase )
return np.argmin(_lowercase , axis=1 )
class __lowerCamelCase (_a ):
_lowercase = ["""pixel_values"""]
def __init__( self: List[Any],A_: Optional[Union[List[List[int]], np.ndarray]] = None,A_: bool = True,A_: Dict[str, int] = None,A_: PILImageResampling = PILImageResampling.BILINEAR,A_: bool = True,A_: bool = True,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(**A_ )
__UpperCamelCase = size if size is not None else {'height': 256, 'width': 256}
__UpperCamelCase = get_size_dict(A_ )
__UpperCamelCase = np.array(A_ ) if clusters is not None else None
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = resample
__UpperCamelCase = do_normalize
__UpperCamelCase = do_color_quantize
def snake_case_ ( self: Optional[int],A_: np.ndarray,A_: Dict[str, int],A_: PILImageResampling = PILImageResampling.BILINEAR,A_: Optional[Union[str, ChannelDimension]] = None,**A_: Dict,):
'''simple docstring'''
__UpperCamelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
A_,size=(size['height'], size['width']),resample=A_,data_format=A_,**A_ )
def snake_case_ ( self: Tuple,A_: np.ndarray,A_: Optional[Union[str, ChannelDimension]] = None,):
'''simple docstring'''
__UpperCamelCase = rescale(image=A_,scale=1 / 1_2_7.5,data_format=A_ )
__UpperCamelCase = image - 1
return image
def snake_case_ ( self: Tuple,A_: ImageInput,A_: bool = None,A_: Dict[str, int] = None,A_: PILImageResampling = None,A_: bool = None,A_: Optional[bool] = None,A_: Optional[Union[List[List[int]], np.ndarray]] = None,A_: Optional[Union[str, TensorType]] = None,A_: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,**A_: Union[str, Any],):
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(A_ )
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCamelCase = clusters if clusters is not None else self.clusters
__UpperCamelCase = np.array(A_ )
__UpperCamelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(A_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=A_,size=A_,resample=A_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=A_ ) for image in images]
if do_color_quantize:
__UpperCamelCase = [to_channel_dimension_format(A_,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCamelCase = np.array(A_ )
__UpperCamelCase = color_quantize(A_,A_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCamelCase = images.shape[0]
__UpperCamelCase = images.reshape(A_,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCamelCase = list(A_ )
else:
__UpperCamelCase = [to_channel_dimension_format(A_,A_ ) for image in images]
__UpperCamelCase = {'input_ids': images}
return BatchFeature(data=A_,tensor_type=A_ )
| 310
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCamelCase (_a ):
def __init__( self: str,A_: VQModel,A_: UNetaDModel,A_: DDIMScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A_,unet=A_,scheduler=A_ )
@torch.no_grad()
def __call__( self: str,A_: int = 1,A_: Optional[Union[torch.Generator, List[torch.Generator]]] = None,A_: float = 0.0,A_: int = 50,A_: Optional[str] = "pil",A_: bool = True,**A_: int,):
'''simple docstring'''
__UpperCamelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),generator=A_,)
__UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__UpperCamelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__UpperCamelCase = {}
if accepts_eta:
__UpperCamelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__UpperCamelCase = self.scheduler.scale_model_input(A_,A_ )
# predict the noise residual
__UpperCamelCase = self.unet(A_,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(A_,A_,A_,**A_ ).prev_sample
# decode the image latents with the VAE
__UpperCamelCase = self.vqvae.decode(A_ ).sample
__UpperCamelCase = (image / 2 + 0.5).clamp(0,1 )
__UpperCamelCase = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 310
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
__UpperCamelCase = {
'input_ids': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]],dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]],dtype=tf.intaa ),
}
__UpperCamelCase = model(A_ )['last_hidden_state']
__UpperCamelCase = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape,A_ )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
],dtype=tf.floataa,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1E-4 ) )
| 310
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 1
|
from __future__ import annotations
import pandas as pd
def _A ( _lowercase , _lowercase , _lowercase ) -> list[int]:
"""simple docstring"""
__UpperCamelCase = [0] * no_of_processes
__UpperCamelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_lowercase ):
__UpperCamelCase = burst_time[i]
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 9_99_99_99_99
__UpperCamelCase = 0
__UpperCamelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__UpperCamelCase = remaining_time[j]
__UpperCamelCase = j
__UpperCamelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__UpperCamelCase = remaining_time[short]
if minm == 0:
__UpperCamelCase = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
__UpperCamelCase = False
# Find finish time of current process
__UpperCamelCase = increment_time + 1
# Calculate waiting time
__UpperCamelCase = finish_time - arrival_time[short]
__UpperCamelCase = finar - burst_time[short]
if waiting_time[short] < 0:
__UpperCamelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _A ( _lowercase , _lowercase , _lowercase ) -> list[int]:
"""simple docstring"""
__UpperCamelCase = [0] * no_of_processes
for i in range(_lowercase ):
__UpperCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _A ( _lowercase , _lowercase , _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 0
for i in range(_lowercase ):
__UpperCamelCase = total_waiting_time + waiting_time[i]
__UpperCamelCase = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
__snake_case = int(input())
__snake_case = [0] * no_of_processes
__snake_case = [0] * no_of_processes
__snake_case = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
__snake_case , __snake_case = map(int, input().split())
__snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case = burst_time
__snake_case = no_of_processes
__snake_case = waiting_time
__snake_case = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__snake_case = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 310
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 1
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_a ) , """Tatoeba directory does not exist.""" )
class __lowerCamelCase (unittest.TestCase ):
@cached_property
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
self.resolver.convert_models(['heb-eng'] )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.resolver.write_model_card('opus-mt-he-en',dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 310
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 1
|
from __future__ import annotations
def _A ( _lowercase , _lowercase ) -> bool:
"""simple docstring"""
__UpperCamelCase = get_failure_array(_lowercase )
# 2) Step through text searching for pattern
__UpperCamelCase, __UpperCamelCase = 0, 0 # index into text, pattern
while i < len(_lowercase ):
if pattern[j] == text[i]:
if j == (len(_lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__UpperCamelCase = failure[j - 1]
continue
i += 1
return False
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
__UpperCamelCase = [0]
__UpperCamelCase = 0
__UpperCamelCase = 1
while j < len(_lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__UpperCamelCase = failure[i - 1]
continue
j += 1
failure.append(_lowercase )
return failure
if __name__ == "__main__":
# Test 1)
__snake_case = '''abc1abc12'''
__snake_case = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__snake_case = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__snake_case = '''ABABX'''
__snake_case = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__snake_case = '''AAAB'''
__snake_case = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__snake_case = '''abcdabcy'''
__snake_case = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__snake_case = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 310
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_a )
class __lowerCamelCase (_a ):
_lowercase = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_lowercase = Features({"""audio""": Audio()} )
_lowercase = Features({"""labels""": ClassLabel} )
_lowercase = "audio"
_lowercase = "labels"
def snake_case_ ( self: Dict,A_: Any ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column],A_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCamelCase = copy.deepcopy(self )
__UpperCamelCase = self.label_schema.copy()
__UpperCamelCase = features[self.label_column]
__UpperCamelCase = label_schema
return task_template
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 310
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 1
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase (_a ):
_lowercase = (DDPMParallelScheduler,)
def snake_case_ ( self: Tuple,**A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def snake_case_ ( self: str ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1],[0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=A_,beta_end=A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_,prediction_type=A_,sample_max_value=A_,)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def snake_case_ ( self: int ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
__UpperCamelCase = len(A_ )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = self.dummy_sample_deter + 0.1
__UpperCamelCase = self.dummy_sample_deter - 0.1
__UpperCamelCase = samplea.shape[0]
__UpperCamelCase = torch.stack([samplea, samplea, samplea],dim=0 )
__UpperCamelCase = torch.arange(A_ )[0:3, None].repeat(1,A_ )
__UpperCamelCase = model(samples.flatten(0,1 ),timesteps.flatten(0,1 ) )
__UpperCamelCase = scheduler.batch_step_no_noise(A_,timesteps.flatten(0,1 ),samples.flatten(0,1 ) )
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
__UpperCamelCase = len(A_ )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
__UpperCamelCase = model(A_,A_ )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(A_,A_,A_,generator=A_ ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
__UpperCamelCase = scheduler_class(**A_ )
__UpperCamelCase = len(A_ )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
__UpperCamelCase = model(A_,A_ )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(A_,A_,A_,generator=A_ ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(A_ ) )
__UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
__UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
__UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
__UpperCamelCase = -1
else:
__UpperCamelCase = timesteps[i + 1]
__UpperCamelCase = scheduler.previous_timestep(A_ )
__UpperCamelCase = prev_t.item()
self.assertEqual(A_,A_ )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
__UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(A_,msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
__UpperCamelCase = [100, 87, 50, 1, 0]
__UpperCamelCase = len(A_ )
with self.assertRaises(A_,msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_,timesteps=A_ )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**A_ )
__UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_,msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}',):
scheduler.set_timesteps(timesteps=A_ )
| 310
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 1
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """AutoImageProcessor"""
_lowercase = """AutoTokenizer"""
def __init__( self: Union[str, Any],A_: Any,A_: Optional[int] ):
'''simple docstring'''
super().__init__(A_,A_ )
__UpperCamelCase = self.image_processor
def __call__( self: str,A_: Union[str, Any]=None,A_: List[Any]=None,A_: Union[str, Any]=None,**A_: List[str] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__UpperCamelCase = self.tokenizer(A_,return_tensors=A_,**A_ )
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: List[Any],*A_: str,**A_: Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: Tuple,*A_: List[str],**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 310
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCamelCase (_a ):
_lowercase = """roberta"""
def __init__( self: Dict,A_: Optional[Any]=5_0265,A_: Union[str, Any]=768,A_: List[Any]=12,A_: int=12,A_: Optional[int]=3072,A_: str="gelu",A_: Optional[int]=0.1,A_: List[Any]=0.1,A_: Tuple=512,A_: Optional[Any]=2,A_: Union[str, Any]=0.0_2,A_: Dict=1E-12,A_: Union[str, Any]=1,A_: Union[str, Any]=0,A_: Any=2,A_: Tuple="absolute",A_: Union[str, Any]=True,A_: Optional[Any]=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 1
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCamelCase (unittest.TestCase ):
def __init__( self: Union[str, Any],A_: Optional[int],A_: Optional[int]=7,A_: List[str]=3,A_: Union[str, Any]=18,A_: Optional[int]=30,A_: Union[str, Any]=400,A_: Optional[Any]=True,A_: List[str]=32,A_: Any=True,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size_divisor
__UpperCamelCase = do_rescale
def snake_case_ ( self: int ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = GLPNImageProcessor if is_vision_available() else None
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = GLPNImageProcessingTester(self )
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_,'do_resize' ) )
self.assertTrue(hasattr(A_,'size_divisor' ) )
self.assertTrue(hasattr(A_,'resample' ) )
self.assertTrue(hasattr(A_,'do_rescale' ) )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 310
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 1
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__snake_case = 2
class __lowerCamelCase :
def __init__( self: Union[str, Any],*, # begin keyword-only arguments
A_: int="<s>",A_: str="<pad>",A_: Union[str, Any]="</s>",A_: Tuple="<unk>",A_: Tuple=None,):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = bos, unk, pad, eos
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = {}
__UpperCamelCase = self.add_symbol(A_ )
__UpperCamelCase = self.add_symbol(A_ )
__UpperCamelCase = self.add_symbol(A_ )
__UpperCamelCase = self.add_symbol(A_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(A_ )
__UpperCamelCase = len(self.symbols )
def __eq__( self: int,A_: Any ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self: Union[str, Any] ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self: List[Any],A_: Tuple ):
'''simple docstring'''
return sym in self.indices
@classmethod
def snake_case_ ( cls: int,A_: List[str] ):
'''simple docstring'''
__UpperCamelCase = cls()
d.add_from_file(A_ )
return d
def snake_case_ ( self: Dict,A_: Optional[Any],A_: Any=1,A_: List[str]=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
__UpperCamelCase = self.indices[word]
__UpperCamelCase = self.count[idx] + n
return idx
else:
__UpperCamelCase = len(self.symbols )
__UpperCamelCase = idx
self.symbols.append(A_ )
self.count.append(A_ )
return idx
def snake_case_ ( self: Union[str, Any],A_: Tuple ):
'''simple docstring'''
return 0
def snake_case_ ( self: Optional[Any],A_: Optional[int] ):
'''simple docstring'''
if isinstance(A_,A_ ):
try:
with open(A_,'r',encoding='utf-8' ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(A_ ) )
return
__UpperCamelCase = f.readlines()
__UpperCamelCase = self._load_meta(A_ )
for line in lines[indices_start_line:]:
try:
__UpperCamelCase, __UpperCamelCase = line.rstrip().rsplit(' ',1 )
if field == "#fairseq:overwrite":
__UpperCamelCase = True
__UpperCamelCase, __UpperCamelCase = line.rsplit(' ',1 )
else:
__UpperCamelCase = False
__UpperCamelCase = int(A_ )
__UpperCamelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(A_ ) )
self.add_symbol(A_,n=A_,overwrite=A_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = dict((re.sub(r'@@$' , '' , _lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowercase ), v) for k, v in d.items() )
__UpperCamelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
__UpperCamelCase = d[k] # restore
return da
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if not os.path.exists(_lowercase ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
__UpperCamelCase = os.path.join(_lowercase , 'checkpoint.pt' )
if not os.path.isfile(_lowercase ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
__UpperCamelCase = torch.load(_lowercase , map_location='cpu' )
__UpperCamelCase = chkpt['cfg']['model']
# dicts
__UpperCamelCase = os.path.join(_lowercase , 'dict.txt' )
if not os.path.isfile(_lowercase ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
__UpperCamelCase = Dictionary.load(_lowercase )
__UpperCamelCase = rewrite_dict_keys(src_dict.indices )
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = os.path.join(_lowercase , VOCAB_FILES_NAMES['vocab_file'] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# merges_file (bpecodes)
__UpperCamelCase = os.path.join(_lowercase , 'bpecodes' )
if not os.path.isfile(_lowercase ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
__UpperCamelCase = os.path.join(_lowercase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(_lowercase , _lowercase )
# model config
__UpperCamelCase = os.path.join(_lowercase , 'config.json' )
__UpperCamelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# tokenizer config
__UpperCamelCase = os.path.join(_lowercase , _lowercase )
__UpperCamelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 10_24,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# model
__UpperCamelCase = chkpt['model']
# remove unneeded keys
__UpperCamelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(_lowercase , _lowercase )
__UpperCamelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__UpperCamelCase = model_state_dict.pop(_lowercase )
else:
__UpperCamelCase = model_state_dict.pop(_lowercase )
__UpperCamelCase = BioGptConfig.from_pretrained(_lowercase )
__UpperCamelCase = BioGptForCausalLM(_lowercase )
# check that it loads ok
model_new.load_state_dict(_lowercase )
# save
__UpperCamelCase = os.path.join(_lowercase , _lowercase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowercase , _lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 310
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__snake_case = logging.get_logger(__name__)
@dataclass
class __lowerCamelCase :
def __init__( self: Tuple,A_: List[Any]=False,A_: Tuple=False,A_: str=6.0,A_: Tuple=None,A_: Tuple=False,A_: Tuple=False,A_: str=None,A_: Optional[Any]="fp4",A_: Optional[Any]=False,**A_: Any,):
'''simple docstring'''
__UpperCamelCase = load_in_abit
__UpperCamelCase = load_in_abit
__UpperCamelCase = llm_inta_threshold
__UpperCamelCase = llm_inta_skip_modules
__UpperCamelCase = llm_inta_enable_fpaa_cpu_offload
__UpperCamelCase = llm_inta_has_fpaa_weight
__UpperCamelCase = bnb_abit_quant_type
__UpperCamelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__UpperCamelCase = torch.floataa
elif isinstance(A_,A_ ):
__UpperCamelCase = getattr(A_,A_ )
elif isinstance(A_,torch.dtype ):
__UpperCamelCase = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def snake_case_ ( self: List[str] ):
'''simple docstring'''
if not isinstance(self.llm_inta_threshold,A_ ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules,A_ ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload,A_ ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight,A_ ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype,torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type,A_ ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant,A_ ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def snake_case_ ( cls: Union[str, Any],A_: List[Any],A_: Optional[Any],**A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = cls(**A_ )
__UpperCamelCase = []
for key, value in kwargs.items():
if hasattr(A_,A_ ):
setattr(A_,A_,A_ )
to_remove.append(A_ )
for key in to_remove:
kwargs.pop(A_,A_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def snake_case_ ( self: Any,A_: Union[str, os.PathLike] ):
'''simple docstring'''
with open(A_,'w',encoding='utf-8' ) as writer:
__UpperCamelCase = self.to_dict()
__UpperCamelCase = json.dumps(A_,indent=2,sort_keys=A_ ) + '\n'
writer.write(A_ )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self: str ):
'''simple docstring'''
return F'''{self.__class__.__name__} {self.to_json_string()}'''
def snake_case_ ( self: int,A_: bool = True ):
'''simple docstring'''
if use_diff is True:
__UpperCamelCase = self.to_diff_dict()
else:
__UpperCamelCase = self.to_dict()
return json.dumps(A_,indent=2,sort_keys=A_ ) + "\n"
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.to_dict()
# get the default config dict
__UpperCamelCase = BitsAndBytesConfig().to_dict()
__UpperCamelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__UpperCamelCase = value
return serializable_config_dict
| 310
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 310
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 1
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__snake_case = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def _A ( _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = EfficientNetConfig()
__UpperCamelCase = CONFIG_MAP[model_name]['hidden_dim']
__UpperCamelCase = CONFIG_MAP[model_name]['width_coef']
__UpperCamelCase = CONFIG_MAP[model_name]['depth_coef']
__UpperCamelCase = CONFIG_MAP[model_name]['image_size']
__UpperCamelCase = CONFIG_MAP[model_name]['dropout_rate']
__UpperCamelCase = CONFIG_MAP[model_name]['dw_padding']
__UpperCamelCase = 'huggingface/label-files'
__UpperCamelCase = 'imagenet-1k-id2label.json'
__UpperCamelCase = 10_00
__UpperCamelCase = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = CONFIG_MAP[model_name]['image_size']
__UpperCamelCase = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_lowercase , )
return preprocessor
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
__UpperCamelCase = sorted(set(_lowercase ) )
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = {b: str(_lowercase ) for b, i in zip(_lowercase , range(_lowercase ) )}
__UpperCamelCase = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
__UpperCamelCase = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
__UpperCamelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
__UpperCamelCase = 'efficientnet.' + item[1]
__UpperCamelCase = 'classifier.weight'
__UpperCamelCase = 'classifier.bias'
return key_mapping
def _A ( _lowercase , _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
__UpperCamelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
__UpperCamelCase = torch.from_numpy(_lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__UpperCamelCase = torch.from_numpy(_lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__UpperCamelCase = torch.from_numpy(np.transpose(_lowercase ) )
else:
__UpperCamelCase = torch.from_numpy(_lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_lowercase )
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = model_classes[model_name](
include_top=_lowercase , weights='imagenet' , input_tensor=_lowercase , input_shape=_lowercase , pooling=_lowercase , classes=10_00 , classifier_activation='softmax' , )
__UpperCamelCase = original_model.trainable_variables
__UpperCamelCase = original_model.non_trainable_variables
__UpperCamelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__UpperCamelCase = param.numpy()
__UpperCamelCase = list(tf_params.keys() )
# Load HuggingFace model
__UpperCamelCase = get_efficientnet_config(_lowercase )
__UpperCamelCase = EfficientNetForImageClassification(_lowercase ).eval()
__UpperCamelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
__UpperCamelCase = rename_keys(_lowercase )
replace_params(_lowercase , _lowercase , _lowercase )
# Initialize preprocessor and preprocess input image
__UpperCamelCase = convert_image_processor(_lowercase )
__UpperCamelCase = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__UpperCamelCase = hf_model(**_lowercase )
__UpperCamelCase = outputs.logits.detach().numpy()
# Original model inference
__UpperCamelCase = False
__UpperCamelCase = CONFIG_MAP[model_name]['image_size']
__UpperCamelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__UpperCamelCase = image.img_to_array(_lowercase )
__UpperCamelCase = np.expand_dims(_lowercase , axis=0 )
__UpperCamelCase = original_model.predict(_lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_lowercase , _lowercase , atol=1e-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_lowercase ):
os.mkdir(_lowercase )
# Save converted model and image processor
hf_model.save_pretrained(_lowercase )
preprocessor.save_pretrained(_lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
__UpperCamelCase = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_lowercase )
hf_model.push_to_hub(_lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 310
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 1
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__snake_case = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__snake_case = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = ''' Hello world! cécé herlolip'''
__snake_case = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = torch.load(_lowercase , map_location='cpu' )
__UpperCamelCase = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
__UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase=None ) -> Tuple:
"""simple docstring"""
if not os.path.exists(_lowercase ):
__UpperCamelCase = torch.hub.load('pytorch/fairseq' , _lowercase ).eval()
else:
__UpperCamelCase = load_xsum_checkpoint(_lowercase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__UpperCamelCase = checkpoint_path.replace('.' , '-' )
__UpperCamelCase = BartConfig.from_pretrained(_lowercase )
__UpperCamelCase = bart.encode(_lowercase ).unsqueeze(0 )
__UpperCamelCase = BartTokenizer.from_pretrained(_lowercase ).encode(_lowercase , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(_lowercase , _lowercase ).all():
raise ValueError(
f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
__UpperCamelCase = bart.state_dict()
remove_ignore_keys_(_lowercase )
__UpperCamelCase = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
__UpperCamelCase = BartForSequenceClassification(_lowercase ).eval()
model.load_state_dict(_lowercase )
__UpperCamelCase = bart.predict('mnli' , _lowercase , return_logits=_lowercase )
__UpperCamelCase = model(_lowercase )[0] # logits
else: # no classification heads to worry about
__UpperCamelCase = bart.model.state_dict()
remove_ignore_keys_(_lowercase )
__UpperCamelCase = state_dict['decoder.embed_tokens.weight']
__UpperCamelCase = bart.extract_features(_lowercase )
if hf_checkpoint_name == "facebook/bart-large":
__UpperCamelCase = BartModel(_lowercase ).eval()
model.load_state_dict(_lowercase )
__UpperCamelCase = model(_lowercase ).model[0]
else:
__UpperCamelCase = BartForConditionalGeneration(_lowercase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_lowercase )
if hasattr(_lowercase , 'lm_head' ):
__UpperCamelCase = make_linear_from_emb(model.model.shared )
__UpperCamelCase = model.model(_lowercase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__snake_case = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 310
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 1
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = prime_factors(_lowercase )
if is_square_free(_lowercase ):
return -1 if len(_lowercase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 1
|
import numpy as np
import datasets
__snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
__snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
__snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase (datasets.Metric ):
def snake_case_ ( self: str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float',id='sequence' ),id='X' ),
} ),)
def snake_case_ ( self: List[str],A_: Optional[Any],A_: int ):
'''simple docstring'''
__UpperCamelCase = np.array(A_ )
__UpperCamelCase = np.array(A_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
__UpperCamelCase = X - np.mean(A_ )
__UpperCamelCase = np.cov(reference_distribution.T )
try:
__UpperCamelCase = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
__UpperCamelCase = np.linalg.pinv(A_ )
__UpperCamelCase = np.dot(A_,A_ )
__UpperCamelCase = np.dot(A_,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 310
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 1
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__snake_case = datasets.utils.logging.get_logger(__name__)
__snake_case = ['''names''', '''prefix''']
__snake_case = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__snake_case = ['''encoding_errors''', '''on_bad_lines''']
__snake_case = ['''date_format''']
@dataclass
class __lowerCamelCase (datasets.BuilderConfig ):
_lowercase = ","
_lowercase = None
_lowercase = "infer"
_lowercase = None
_lowercase = None
_lowercase = None
_lowercase = None
_lowercase = None
_lowercase = True
_lowercase = None
_lowercase = None
_lowercase = None
_lowercase = None
_lowercase = False
_lowercase = None
_lowercase = None
_lowercase = None
_lowercase = True
_lowercase = True
_lowercase = False
_lowercase = True
_lowercase = None
_lowercase = "."
_lowercase = None
_lowercase = '"'
_lowercase = 0
_lowercase = None
_lowercase = None
_lowercase = None
_lowercase = None
_lowercase = True
_lowercase = True
_lowercase = 0
_lowercase = True
_lowercase = False
_lowercase = None
_lowercase = 1_0000
_lowercase = None
_lowercase = "strict"
_lowercase = "error"
_lowercase = None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
if self.delimiter is not None:
__UpperCamelCase = self.delimiter
if self.column_names is not None:
__UpperCamelCase = self.column_names
@property
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(),A_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __lowerCamelCase (datasets.ArrowBasedBuilder ):
_lowercase = CsvConfig
def snake_case_ ( self: Dict ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def snake_case_ ( self: Union[str, Any],A_: Optional[Any] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A_,(str, list, tuple) ):
__UpperCamelCase = data_files
if isinstance(A_,A_ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(A_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN,gen_kwargs={'files': files} )]
__UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(A_,A_ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(A_ ) for file in files]
splits.append(datasets.SplitGenerator(name=A_,gen_kwargs={'files': files} ) )
return splits
def snake_case_ ( self: List[Any],A_: pa.Table ):
'''simple docstring'''
if self.config.features is not None:
__UpperCamelCase = self.config.features.arrow_schema
if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ):
# cheaper cast
__UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema],schema=A_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__UpperCamelCase = table_cast(A_,A_ )
return pa_table
def snake_case_ ( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__UpperCamelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object
for name, dtype, feature in zip(schema.names,schema.types,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ):
__UpperCamelCase = pd.read_csv(A_,iterator=A_,dtype=A_,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(A_ ):
__UpperCamelCase = pa.Table.from_pandas(A_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A_ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(A_ )}: {e}''' )
raise
| 310
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 1
|
import math
import tensorflow as tf
from packaging import version
def _A ( _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tf.convert_to_tensor(_lowercase )
__UpperCamelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _A ( _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tf.convert_to_tensor(_lowercase )
__UpperCamelCase = tf.cast(math.pi , x.dtype )
__UpperCamelCase = tf.cast(0.04_47_15 , x.dtype )
__UpperCamelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowercase , 3 )) ))
return x * cdf
def _A ( _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tf.convert_to_tensor(_lowercase )
return x * tf.tanh(tf.math.softplus(_lowercase ) )
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = tf.convert_to_tensor(_lowercase )
__UpperCamelCase = tf.cast(0.04_47_15 , x.dtype )
__UpperCamelCase = tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _A ( _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = tf.convert_to_tensor(_lowercase )
__UpperCamelCase = tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return tf.clip_by_value(_gelu(_lowercase ) , -10 , 10 )
def _A ( _lowercase , _lowercase=-1 ) -> Dict:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = tf.split(_lowercase , 2 , axis=_lowercase )
return a * tf.math.sigmoid(_lowercase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def _A ( _lowercase ) -> str:
"""simple docstring"""
return tf.keras.activations.gelu(_lowercase , approximate=_lowercase )
__snake_case = tf.keras.activations.gelu
__snake_case = approximate_gelu_wrap
else:
__snake_case = _gelu
__snake_case = _gelu_new
__snake_case = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 310
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 1
|
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__snake_case = object()
# For specifying empty leaf dict `{}`
__snake_case = object()
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(_lowercase ) - len(_lowercase ) + 1 ):
__UpperCamelCase = [x.match(_lowercase ) for x, y in zip(_lowercase , ks[i:] )]
if matches and all(_lowercase ):
return True
return False
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
def replace(_lowercase , _lowercase ):
for rule, replacement in rules:
if _match(_lowercase , _lowercase ):
return replacement
return val
return replace
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , _lowercase )),
(("transformer", "wte", "embedding"), P('mp' , _lowercase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_lowercase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , _lowercase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_lowercase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , _lowercase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = _get_partition_rules()
__UpperCamelCase = _replacement_rules(_lowercase )
__UpperCamelCase = {k: _unmatched for k in flatten_dict(_lowercase )}
__UpperCamelCase = {k: replace(_lowercase , _lowercase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_lowercase ) )
| 310
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 1
|
from __future__ import annotations
import numpy as np
def _A ( _lowercase ) -> Optional[int]:
"""simple docstring"""
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 310
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 1
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase = -1
__UpperCamelCase = ids_tensor((1, 5),vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase = model.generate(A_,max_new_tokens=10,do_sample=A_ )
__UpperCamelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCamelCase = TextStreamer(A_ )
model.generate(A_,max_new_tokens=10,do_sample=A_,streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCamelCase = cs.out[:-1]
self.assertEqual(A_,A_ )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase = -1
__UpperCamelCase = ids_tensor((1, 5),vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase = model.generate(A_,max_new_tokens=10,do_sample=A_ )
__UpperCamelCase = tokenizer.decode(greedy_ids[0] )
__UpperCamelCase = TextIteratorStreamer(A_ )
__UpperCamelCase = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__UpperCamelCase = Thread(target=model.generate,kwargs=A_ )
thread.start()
__UpperCamelCase = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A_,A_ )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase = -1
__UpperCamelCase = ids_tensor((1, 5),vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase = model.generate(A_,max_new_tokens=10,do_sample=A_ )
__UpperCamelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCamelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCamelCase = TextStreamer(A_,skip_prompt=A_ )
model.generate(A_,max_new_tokens=10,do_sample=A_,streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCamelCase = cs.out[:-1]
self.assertEqual(A_,A_ )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('distilgpt2' )
__UpperCamelCase = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(A_ )
__UpperCamelCase = -1
__UpperCamelCase = torch.ones((1, 5),device=A_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCamelCase = TextStreamer(A_,skip_special_tokens=A_ )
model.generate(A_,max_new_tokens=1,do_sample=A_,streamer=A_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCamelCase = cs.out[:-1] # Remove the final "\n"
__UpperCamelCase = tokenizer(A_,return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape,(1, 1) )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase = -1
__UpperCamelCase = ids_tensor((1, 5),vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase = TextIteratorStreamer(A_,timeout=0.0_0_1 )
__UpperCamelCase = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__UpperCamelCase = Thread(target=model.generate,kwargs=A_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A_ ):
__UpperCamelCase = ''
for new_text in streamer:
streamer_text += new_text
| 310
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 1
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase (_a ):
_lowercase = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
_lowercase = field(default=_a , metadata={"""help""": """Whether to SortishSamler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(default=_a , metadata={"""help""": """whether to use adafactor"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
_lowercase = field(default=_a , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
_lowercase = field(
default="""linear""" , metadata={"""help""": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 310
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 1
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 1
|
from __future__ import annotations
def _A ( _lowercase ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowercase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowercase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 1
|
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _A ( _lowercase , _lowercase , _lowercase=1e-1_2 ) -> Any:
"""simple docstring"""
__UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_lowercase , axis=1 ) , a_min=_lowercase ) ).T
__UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_lowercase , axis=1 ) , a_min=_lowercase ) ).T
return jnp.matmul(_lowercase , norm_emb_a.T )
class __lowerCamelCase (nn.Module ):
_lowercase = 42
_lowercase = jnp.floataa
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCamelCase = nn.Dense(self.config.projection_dim,use_bias=A_,dtype=self.dtype )
__UpperCamelCase = self.param('concept_embeds',jax.nn.initializers.ones,(17, self.config.projection_dim) )
__UpperCamelCase = self.param(
'special_care_embeds',jax.nn.initializers.ones,(3, self.config.projection_dim) )
__UpperCamelCase = self.param('concept_embeds_weights',jax.nn.initializers.ones,(17,) )
__UpperCamelCase = self.param('special_care_embeds_weights',jax.nn.initializers.ones,(3,) )
def __call__( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.vision_model(A_ )[1]
__UpperCamelCase = self.visual_projection(A_ )
__UpperCamelCase = jax_cosine_distance(A_,self.special_care_embeds )
__UpperCamelCase = jax_cosine_distance(A_,self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCamelCase = 0.0
__UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCamelCase = jnp.round(A_,3 )
__UpperCamelCase = jnp.any(special_scores > 0,axis=1,keepdims=A_ )
# Use a lower threshold if an image has any special care concept
__UpperCamelCase = is_special_care * 0.0_1
__UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCamelCase = jnp.round(A_,3 )
__UpperCamelCase = jnp.any(concept_scores > 0,axis=1 )
return has_nsfw_concepts
class __lowerCamelCase (_a ):
_lowercase = CLIPConfig
_lowercase = """clip_input"""
_lowercase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: List[Any],A_: CLIPConfig,A_: Optional[Tuple] = None,A_: int = 0,A_: jnp.dtype = jnp.floataa,A_: bool = True,**A_: List[Any],):
'''simple docstring'''
if input_shape is None:
__UpperCamelCase = (1, 224, 224, 3)
__UpperCamelCase = self.module_class(config=A_,dtype=A_,**A_ )
super().__init__(A_,A_,input_shape=A_,seed=A_,dtype=A_,_do_init=_do_init )
def snake_case_ ( self: Dict,A_: jax.random.KeyArray,A_: Tuple,A_: FrozenDict = None ):
'''simple docstring'''
__UpperCamelCase = jax.random.normal(A_,A_ )
__UpperCamelCase, __UpperCamelCase = jax.random.split(A_ )
__UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
__UpperCamelCase = self.module.init(A_,A_ )['params']
return random_params
def __call__( self: Tuple,A_: Any,A_: dict = None,):
'''simple docstring'''
__UpperCamelCase = jnp.transpose(A_,(0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params},jnp.array(A_,dtype=jnp.floataa ),rngs={},)
| 310
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 1
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 1
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 1
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __lowerCamelCase (unittest.TestCase ):
def __init__( self: Tuple,A_: List[Any],A_: str=7,A_: Any=3,A_: Tuple=18,A_: Any=30,A_: Union[str, Any]=400,A_: List[str]=True,A_: Optional[int]=None,A_: Optional[int]=True,A_: int=[0.5, 0.5, 0.5],A_: List[Any]=[0.5, 0.5, 0.5],):
'''simple docstring'''
__UpperCamelCase = size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean
__UpperCamelCase = image_std
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = DPTImageProcessor if is_vision_available() else None
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = DPTImageProcessingTester(self )
@property
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_,'image_mean' ) )
self.assertTrue(hasattr(A_,'image_std' ) )
self.assertTrue(hasattr(A_,'do_normalize' ) )
self.assertTrue(hasattr(A_,'do_resize' ) )
self.assertTrue(hasattr(A_,'size' ) )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{'height': 18, 'width': 18} )
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict,size=42 )
self.assertEqual(image_processor.size,{'height': 42, 'width': 42} )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_,Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_,np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester,equal_resolution=A_,torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_,torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
# Test batched
__UpperCamelCase = image_processing(A_,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
),)
| 310
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 1
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__snake_case = logging.get_logger(__name__)
__snake_case = '''T5Config'''
class __lowerCamelCase (_a ):
_lowercase = """mt5"""
_lowercase = MTaConfig
class __lowerCamelCase (_a ):
_lowercase = """mt5"""
_lowercase = MTaConfig
class __lowerCamelCase (_a ):
_lowercase = """mt5"""
_lowercase = MTaConfig
| 310
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__snake_case = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__snake_case = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__snake_case = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase (datasets.Metric ):
def snake_case_ ( self: str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),id='references' ),
} ),)
def snake_case_ ( self: Tuple,A_: List[List[List[str]]],A_: List[List[str]],A_: int = 1,A_: int = 4,):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A_,hypotheses=A_,min_len=A_,max_len=A_ )
}
| 310
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
__UpperCamelCase = DetaConfig(
backbone_config=_lowercase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_lowercase , with_box_refine=_lowercase , two_stage=_lowercase , )
# set labels
__UpperCamelCase = 'huggingface/label-files'
if "o365" in model_name:
__UpperCamelCase = 3_66
__UpperCamelCase = 'object365-id2label.json'
else:
__UpperCamelCase = 91
__UpperCamelCase = 'coco-detection-id2label.json'
__UpperCamelCase = num_labels
__UpperCamelCase = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type='dataset' ) ) , 'r' ) )
__UpperCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def _A ( _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__UpperCamelCase = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:dim, :]
__UpperCamelCase = in_proj_bias[: dim]
__UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
__UpperCamelCase = in_proj_bias[
dim : dim * 2
]
__UpperCamelCase = in_proj_weight[
-dim :, :
]
__UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCamelCase = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:hidden_size, :]
__UpperCamelCase = in_proj_bias[:hidden_size]
__UpperCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCamelCase = in_proj_weight[-hidden_size:, :]
__UpperCamelCase = in_proj_bias[-hidden_size:]
def _A ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = get_deta_config(_lowercase )
# load original state dict
if model_name == "deta-swin-large":
__UpperCamelCase = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
__UpperCamelCase = torch.load(_lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_lowercase , param.shape )
# rename keys
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_swin_q_k_v(_lowercase , config.backbone_config )
read_in_decoder_q_k_v(_lowercase , _lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCamelCase = state_dict.pop(_lowercase )
__UpperCamelCase = val
if "input_proj" in key:
__UpperCamelCase = state_dict.pop(_lowercase )
__UpperCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCamelCase = state_dict.pop(_lowercase )
__UpperCamelCase = val
# finally, create HuggingFace model and load state dict
__UpperCamelCase = DetaForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_lowercase )
# load image processor
__UpperCamelCase = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
__UpperCamelCase = prepare_img()
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' )
__UpperCamelCase = encoding['pixel_values']
__UpperCamelCase = model(pixel_values.to(_lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCamelCase = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
__UpperCamelCase = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
__UpperCamelCase = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowercase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowercase ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 310
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''unc-nlp/lxmert-base-uncased''': 5_1_2,
}
__snake_case = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_INIT_CONFIGURATION
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = LxmertTokenizer
def __init__( self: Any,A_: Optional[Any]=None,A_: int=None,A_: int=True,A_: Optional[Any]="[UNK]",A_: List[Any]="[SEP]",A_: str="[PAD]",A_: Optional[Any]="[CLS]",A_: Optional[int]="[MASK]",A_: List[str]=True,A_: Tuple=None,**A_: Union[str, Any],):
'''simple docstring'''
super().__init__(
A_,tokenizer_file=A_,do_lower_case=A_,unk_token=A_,sep_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,tokenize_chinese_chars=A_,strip_accents=A_,**A_,)
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase',A_ ) != do_lower_case
or normalizer_state.get('strip_accents',A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars',A_ ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(A_,normalizer_state.pop('type' ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**A_ )
__UpperCamelCase = do_lower_case
def snake_case_ ( self: Dict,A_: List[Any],A_: Optional[int]=None ):
'''simple docstring'''
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self: str,A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self: int,A_: str,A_: Optional[str] = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(A_,name=A_ )
return tuple(A_ )
| 310
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.