code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'instructblip_vision_model'
def __init__( self , UpperCamelCase_=1408 , UpperCamelCase_=6144 , UpperCamelCase_=39 , UpperCamelCase_=16 , UpperCamelCase_=224 , UpperCamelCase_=14 , UpperCamelCase_="gelu" , UpperCamelCase_=1e-6 , UpperCamelCase_=0.0 , UpperCamelCase_=1e-10 , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = hidden_size
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :List[Any] = patch_size
UpperCamelCase__ :Tuple = image_size
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :int = attention_dropout
UpperCamelCase__ :Union[str, Any] = layer_norm_eps
UpperCamelCase__ :List[Any] = hidden_act
UpperCamelCase__ :str = qkv_bias
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
UpperCamelCase__ :List[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
_a = 'instructblip_qformer'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0 , UpperCamelCase_="absolute" , UpperCamelCase_=2 , UpperCamelCase_=1408 , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[str] = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :List[str] = hidden_act
UpperCamelCase__ :List[str] = intermediate_size
UpperCamelCase__ :str = hidden_dropout_prob
UpperCamelCase__ :Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :Dict = position_embedding_type
UpperCamelCase__ :int = cross_attention_frequency
UpperCamelCase__ :str = encoder_hidden_size
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Dict = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
UpperCamelCase__ :str = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
_a = 'instructblip'
_a = True
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=32 , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
UpperCamelCase__ :int = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
UpperCamelCase__ :Union[str, Any] = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
UpperCamelCase__ :str = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
UpperCamelCase__ :Union[str, Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
UpperCamelCase__ :str = InstructBlipQFormerConfig(**UpperCamelCase_ )
UpperCamelCase__ :List[str] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
UpperCamelCase__ :Union[str, Any] = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.text_config.tie_word_embeddings
UpperCamelCase__ :int = self.text_config.is_encoder_decoder
UpperCamelCase__ :Union[str, Any] = num_query_tokens
UpperCamelCase__ :Optional[Any] = self.vision_config.hidden_size
UpperCamelCase__ :Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCamelCase__ :str = 1.0
UpperCamelCase__ :List[str] = 0.02
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = copy.deepcopy(self.__dict__ )
UpperCamelCase__ :List[str] = self.vision_config.to_dict()
UpperCamelCase__ :Tuple = self.qformer_config.to_dict()
UpperCamelCase__ :List[Any] = self.text_config.to_dict()
UpperCamelCase__ :Dict = self.__class__.model_type
return output | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__snake_case = '''\
'''
__snake_case = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
__snake_case = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = True , UpperCamelCase_=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase__ :Tuple = '''cuda'''
else:
UpperCamelCase__ :Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCamelCase__ :Tuple = AutoModelForCausalLM.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :List[str] = model.to(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase__ :Any = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCamelCase_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase__ :str = model.config.max_length - 1
else:
UpperCamelCase__ :List[str] = model.config.max_length
UpperCamelCase__ :List[str] = tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors='''pt''' , return_attention_mask=UpperCamelCase_ , ).to(UpperCamelCase_ )
UpperCamelCase__ :Tuple = encodings['''input_ids''']
UpperCamelCase__ :str = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase__ :str = []
UpperCamelCase__ :Any = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ) ):
UpperCamelCase__ :int = min(start_index + batch_size , len(UpperCamelCase_ ) )
UpperCamelCase__ :Tuple = encoded_texts[start_index:end_index]
UpperCamelCase__ :Any = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase__ :List[str] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase__ :Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase_ ), attn_mask] , dim=1 )
UpperCamelCase__ :Optional[Any] = encoded_batch
with torch.no_grad():
UpperCamelCase__ :Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ).logits
UpperCamelCase__ :Optional[int] = out_logits[..., :-1, :].contiguous()
UpperCamelCase__ :List[Any] = labels[..., 1:].contiguous()
UpperCamelCase__ :int = attn_mask[..., 1:].contiguous()
UpperCamelCase__ :List[str] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase_ )} | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__snake_case = logging.get_logger(__name__)
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **UpperCamelCase_ ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCamelCase__ :int = deprecated_arg[3:]
UpperCamelCase__ :int = not kwargs.pop(UpperCamelCase_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCamelCase__ :int = kwargs.pop('''tpu_name''' , self.tpu_name )
UpperCamelCase__ :List[str] = kwargs.pop('''device_idx''' , self.device_idx )
UpperCamelCase__ :List[str] = kwargs.pop('''eager_mode''' , self.eager_mode )
UpperCamelCase__ :Dict = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**UpperCamelCase_ )
_a = field(
default=A__ , metadata={'help': 'Name of TPU'} , )
_a = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_a = field(default=A__ , metadata={'help': 'Benchmark models in eager model.'} )
_a = field(
default=A__ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
UpperCamelCase__ :List[str] = None
if self.tpu:
try:
if self.tpu_name:
UpperCamelCase__ :Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCamelCase__ :Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCamelCase__ :Dict = None
return tpu
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCamelCase__ :Any = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
UpperCamelCase__ :Optional[Any] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
UpperCamelCase__ :List[str] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.n_gpu > 0 | 97 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 | 1 |
'''simple docstring'''
def a ( __a = 4000000 ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = []
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__a )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = b, a + b
return sum(__a )
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 1 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(__a , x % y )
def a ( __a , __a ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(__a , __a )
def a ( __a = 20 ) -> int:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = 1
for i in range(1 , n + 1 ):
UpperCamelCase__ :Tuple = lcm(__a , __a )
return g
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 | 1 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 | 1 |
'''simple docstring'''
def a ( __a , __a ) -> str:
'''simple docstring'''
if not isinstance(__a , __a ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__a , __a ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
UpperCamelCase__ :Optional[int] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__a )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 1 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__snake_case = None
try:
import msvcrt
except ImportError:
__snake_case = None
try:
import fcntl
except ImportError:
__snake_case = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__snake_case = OSError
# Data
# ------------------------------------------------
__snake_case = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
__snake_case = '''3.0.12'''
__snake_case = None
def a ( ) -> List[str]:
'''simple docstring'''
global _logger
UpperCamelCase__ :Any = _logger or logging.getLogger(__name__ )
return _logger
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = lock_file
return None
def __str__( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
self.lock.release()
return None
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=-1 , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[str] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase__ :Tuple = self.hash_filename_if_too_long(UpperCamelCase_ , UpperCamelCase_ )
# The path to the lock file.
UpperCamelCase__ :List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase__ :Union[str, Any] = None
# The default timeout value.
UpperCamelCase__ :Tuple = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase__ :List[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase__ :Union[str, Any] = 0
return None
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._lock_file
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = float(UpperCamelCase_ )
return None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
raise NotImplementedError()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCAmelCase__ ( self , UpperCamelCase_=None , UpperCamelCase_=0.05 ):
'''simple docstring'''
if timeout is None:
UpperCamelCase__ :Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase__ :Optional[Any] = id(self )
UpperCamelCase__ :Any = self._lock_file
UpperCamelCase__ :Optional[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(UpperCamelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase__ :Tuple = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase__ ( self , UpperCamelCase_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase__ :Dict = id(self )
UpperCamelCase__ :Optional[int] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
UpperCamelCase__ :Any = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=UpperCamelCase_ )
return None
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = os.path.basename(UpperCamelCase_ )
if len(UpperCamelCase_ ) > max_length and max_length > 0:
UpperCamelCase__ :List[Any] = os.path.dirname(UpperCamelCase_ )
UpperCamelCase__ :str = str(hash(UpperCamelCase_ ) )
UpperCamelCase__ :List[str] = filename[: max_length - len(UpperCamelCase_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(UpperCamelCase_ , UpperCamelCase_ )
else:
return path
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=-1 , UpperCamelCase_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(UpperCamelCase_ , timeout=UpperCamelCase_ , max_filename_length=UpperCamelCase_ )
UpperCamelCase__ :Any = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase__ :Optional[int] = os.open(self._lock_file , UpperCamelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCamelCase_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(UpperCamelCase_ )
else:
UpperCamelCase__ :int = fd
return None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self._lock_file_fd
UpperCamelCase__ :Tuple = None
msvcrt.locking(UpperCamelCase_ , msvcrt.LK_UNLCK , 1 )
os.close(UpperCamelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=-1 , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = os.statvfs(os.path.dirname(UpperCamelCase_ ) ).f_namemax
super().__init__(UpperCamelCase_ , timeout=UpperCamelCase_ , max_filename_length=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase__ :Union[str, Any] = os.open(self._lock_file , UpperCamelCase_ )
try:
fcntl.flock(UpperCamelCase_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCamelCase_ )
else:
UpperCamelCase__ :Optional[int] = fd
return None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self._lock_file_fd
UpperCamelCase__ :str = None
fcntl.flock(UpperCamelCase_ , fcntl.LOCK_UN )
os.close(UpperCamelCase_ )
return None
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase__ :Tuple = os.open(self._lock_file , UpperCamelCase_ )
except OSError:
pass
else:
UpperCamelCase__ :int = fd
return None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase__ :List[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__snake_case = None
if msvcrt:
__snake_case = WindowsFileLock
elif fcntl:
__snake_case = UnixFileLock
else:
__snake_case = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''') | 97 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = [10, 20, 30, 40, 50, 60]
UpperCamelCase__ :Optional[int] = [2, 4, 6, 8, 10, 12]
UpperCamelCase__ :Dict = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , '''max_weight must greater than zero.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , '''Weight can not be negative.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , '''Profit can not be negative.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , '''max_weight must greater than zero.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main() | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
'''simple docstring'''
import torch
from torch import nn
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Union[str, Any] = n_token
UpperCamelCase__ :Union[str, Any] = d_embed
UpperCamelCase__ :List[Any] = d_proj
UpperCamelCase__ :Optional[Any] = cutoffs + [n_token]
UpperCamelCase__ :Any = [0] + self.cutoffs
UpperCamelCase__ :Any = div_val
UpperCamelCase__ :Optional[int] = self.cutoffs[0]
UpperCamelCase__ :List[Any] = len(self.cutoffs ) - 1
UpperCamelCase__ :Dict = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase__ :int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase__ :Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase__ :Any = nn.ModuleList()
UpperCamelCase__ :Optional[int] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase_ , UpperCamelCase_ ) ) )
else:
self.out_projs.append(UpperCamelCase_ )
self.out_layers.append(nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase__ , UpperCamelCase__ :str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase_ , UpperCamelCase_ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase_ , r_idx - l_idx ) )
UpperCamelCase__ :Optional[int] = keep_order
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if proj is None:
UpperCamelCase__ :int = nn.functional.linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase__ :int = nn.functional.linear(UpperCamelCase_ , proj.t().contiguous() )
UpperCamelCase__ :Any = nn.functional.linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase__ :int = hidden[..., :-1, :].contiguous()
UpperCamelCase__ :int = labels[..., 1:].contiguous()
UpperCamelCase__ :Dict = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase__ :List[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
UpperCamelCase__ :List[str] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase__ :List[Any] = self._compute_logit(UpperCamelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase__ :Tuple = labels != -100
UpperCamelCase__ :List[str] = torch.zeros_like(UpperCamelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase__ :List[str] = (
-nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase__ :Dict = nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ :int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ :Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ :Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ :List[Any] = self.out_layers[i].weight
UpperCamelCase__ :Tuple = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ :List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase__ :List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase_ )
biases.append(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ :List[Any] = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
if labels is None:
UpperCamelCase__ :Dict = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase__ :Optional[Any] = torch.zeros_like(UpperCamelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase__ :Any = 0
UpperCamelCase__ :List[str] = [0] + self.cutoffs
for i in range(len(UpperCamelCase_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ :Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase__ :List[Any] = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase__ :List[Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase__ :Optional[Any] = labels.index_select(0 , UpperCamelCase_ ) - l_idx
UpperCamelCase__ :List[str] = head_logprob.index_select(0 , UpperCamelCase_ )
UpperCamelCase__ :Tuple = hidden.index_select(0 , UpperCamelCase_ )
else:
UpperCamelCase__ :Any = hidden
if i == 0:
if labels is not None:
UpperCamelCase__ :str = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ :List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ :Optional[Any] = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Any = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase__ :List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ :Any = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase__ :List[str] = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.n_clusters == 0:
UpperCamelCase__ :int = self._compute_logit(UpperCamelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ :List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ :int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ :Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ :int = self.out_layers[i].weight
UpperCamelCase__ :Tuple = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ :Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase__ :str = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase_ )
biases.append(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ :int = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase__ :Any = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :Optional[Any] = [0] + self.cutoffs
for i in range(len(UpperCamelCase_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase__ :str = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ :Dict = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Tuple = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :str = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase__ :str = logprob_i
return out | 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 1 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__snake_case = get_logger(__name__)
__snake_case = Path(__file__).parent / '''model_card_template.md'''
__snake_case = uuida().hex
__snake_case = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
__snake_case = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
__snake_case = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def a ( __a = None ) -> str:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def a ( __a , __a = None , __a = None ) -> Optional[Any]:
'''simple docstring'''
if token is None:
UpperCamelCase__ :Optional[int] = HfFolder.get_token()
if organization is None:
UpperCamelCase__ :Optional[Any] = whoami(__a )['''name''']
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__a , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
UpperCamelCase__ :Any = args.hub_token if hasattr(__a , '''hub_token''' ) else None
UpperCamelCase__ :List[Any] = get_full_repo_name(__a , token=__a )
UpperCamelCase__ :Union[str, Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(__a , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
UpperCamelCase__ :Union[str, Any] = os.path.join(args.output_dir , '''README.md''' )
model_card.save(__a )
def a ( __a , __a = None ) -> int:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCamelCase__ :str = str(Path(__a ).as_posix() )
UpperCamelCase__ :Union[str, Any] = re.search(R'''snapshots/([^/]+)/''' , __a )
if search is None:
return None
UpperCamelCase__ :int = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__snake_case = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
__snake_case = os.path.join(hf_cache_home, '''diffusers''')
def a ( __a = None , __a = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
UpperCamelCase__ :str = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCamelCase__ :Tuple = old_diffusers_cache
UpperCamelCase__ :Optional[int] = Path(__a ).expanduser()
UpperCamelCase__ :Dict = Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCamelCase__ :Tuple = new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__snake_case = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
__snake_case = 0
else:
with open(cache_version_file) as f:
try:
__snake_case = int(f.read())
except ValueError:
__snake_case = 0
if cache_version < 1:
__snake_case = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
__snake_case = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def a ( __a , __a = None ) -> str:
'''simple docstring'''
if variant is not None:
UpperCamelCase__ :List[str] = weights_name.split('''.''' )
UpperCamelCase__ :Any = splits[:-1] + [variant] + splits[-1:]
UpperCamelCase__ :Dict = '''.'''.join(__a )
return weights_name
def a ( __a , *,
__a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a=None , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :str = str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
UpperCamelCase__ :str = os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
UpperCamelCase__ :Optional[Any] = os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('''0.20.0''' )
):
try:
UpperCamelCase__ :str = hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}\' so that the correct variant file can be added.''' , __a , )
try:
# 2. Load model file as usual
UpperCamelCase__ :Optional[int] = hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' ) | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
import re
def a ( __a ) -> str:
'''simple docstring'''
if len(re.findall('''[ATCG]''' , __a ) ) != len(__a ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
UpperCamelCase__ :str = kwargs.pop('''text_queries''' )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
UpperCamelCase__ :List[Any] = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
UpperCamelCase__ :Any = image
UpperCamelCase__ :Optional[int] = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = {}
if "threshold" in kwargs:
UpperCamelCase__ :List[Any] = kwargs['''threshold''']
if "top_k" in kwargs:
UpperCamelCase__ :Optional[Any] = kwargs['''top_k''']
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = load_image(inputs['''image'''] )
UpperCamelCase__ :Dict = inputs['''candidate_labels''']
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[Any] = candidate_labels.split(''',''' )
UpperCamelCase__ :Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
UpperCamelCase__ :List[str] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
UpperCamelCase__ :Optional[int] = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = model_inputs.pop('''target_size''' )
UpperCamelCase__ :Optional[int] = model_inputs.pop('''candidate_label''' )
UpperCamelCase__ :List[Any] = model_inputs.pop('''is_last''' )
UpperCamelCase__ :str = self.model(**UpperCamelCase_ )
UpperCamelCase__ :str = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0.1 , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :str = []
for model_output in model_outputs:
UpperCamelCase__ :Optional[int] = model_output['''candidate_label''']
UpperCamelCase__ :Union[str, Any] = BaseModelOutput(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
UpperCamelCase__ :Dict = outputs['''scores'''][index].item()
UpperCamelCase__ :int = self._get_bounding_box(outputs['''boxes'''][index][0] )
UpperCamelCase__ :Optional[int] = {'''score''': score, '''label''': label, '''box''': box}
results.append(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
UpperCamelCase__ :Tuple = results[:top_k]
return results
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = box.int().tolist()
UpperCamelCase__ :Union[str, Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase :
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :int = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ :int = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase__ :int = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase__ :Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :int = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCamelCase__ :str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.get_dummy_components()
UpperCamelCase__ :List[Any] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :int = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :List[str] = inputs['''prompt''']
UpperCamelCase__ :Tuple = inputs['''generator''']
UpperCamelCase__ :Optional[Any] = inputs['''num_inference_steps''']
UpperCamelCase__ :List[str] = inputs['''output_type''']
if "image" in inputs:
UpperCamelCase__ :Optional[int] = inputs['''image''']
else:
UpperCamelCase__ :Optional[Any] = None
if "mask_image" in inputs:
UpperCamelCase__ :List[Any] = inputs['''mask_image''']
else:
UpperCamelCase__ :Union[str, Any] = None
if "original_image" in inputs:
UpperCamelCase__ :Optional[Any] = inputs['''original_image''']
else:
UpperCamelCase__ :Optional[Any] = None
UpperCamelCase__ , UpperCamelCase__ :Tuple = pipe.encode_prompt(UpperCamelCase_ )
# inputs with prompt converted to embeddings
UpperCamelCase__ :List[str] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase__ :List[str] = image
if mask_image is not None:
UpperCamelCase__ :List[Any] = mask_image
if original_image is not None:
UpperCamelCase__ :Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Dict = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase_ , UpperCamelCase_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCamelCase__ :List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :str = inputs['''generator''']
UpperCamelCase__ :Optional[int] = inputs['''num_inference_steps''']
UpperCamelCase__ :Any = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCamelCase__ :List[str] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase__ :Dict = image
if mask_image is not None:
UpperCamelCase__ :int = mask_image
if original_image is not None:
UpperCamelCase__ :Optional[int] = original_image
UpperCamelCase__ :str = pipe_loaded(**UpperCamelCase_ )[0]
UpperCamelCase__ :Optional[Any] = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max()
self.assertLess(UpperCamelCase_ , 1e-4 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.get_dummy_components()
UpperCamelCase__ :List[str] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCamelCase__ :Tuple = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :int = pipe_loaded(**UpperCamelCase_ )[0]
UpperCamelCase__ :Tuple = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max()
self.assertLess(UpperCamelCase_ , 1e-4 ) | 97 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 1 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''ViTFeatureExtractor''']
__snake_case = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=16 , UpperCamelCase_=[32, 64, 128] , UpperCamelCase_=[1, 2, 1] , UpperCamelCase_=[2, 2, 4] , UpperCamelCase_=2 , UpperCamelCase_=2.0 , UpperCamelCase_=True , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_="gelu" , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0.02 , UpperCamelCase_=1e-5 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=10 , UpperCamelCase_=8 , UpperCamelCase_=["stage1", "stage2"] , UpperCamelCase_=[1, 2] , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = parent
UpperCamelCase__ :Any = batch_size
UpperCamelCase__ :int = image_size
UpperCamelCase__ :int = patch_size
UpperCamelCase__ :int = num_channels
UpperCamelCase__ :Union[str, Any] = embed_dim
UpperCamelCase__ :int = hidden_sizes
UpperCamelCase__ :Optional[int] = depths
UpperCamelCase__ :Optional[Any] = num_heads
UpperCamelCase__ :Optional[Any] = window_size
UpperCamelCase__ :List[Any] = mlp_ratio
UpperCamelCase__ :List[Any] = qkv_bias
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :List[Any] = drop_path_rate
UpperCamelCase__ :int = hidden_act
UpperCamelCase__ :List[str] = use_absolute_embeddings
UpperCamelCase__ :Optional[int] = patch_norm
UpperCamelCase__ :int = layer_norm_eps
UpperCamelCase__ :str = initializer_range
UpperCamelCase__ :Any = is_training
UpperCamelCase__ :Optional[int] = scope
UpperCamelCase__ :Dict = use_labels
UpperCamelCase__ :List[str] = type_sequence_label_size
UpperCamelCase__ :Union[str, Any] = encoder_stride
UpperCamelCase__ :List[Any] = out_features
UpperCamelCase__ :int = out_indices
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :int = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = FocalNetModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :int = model(UpperCamelCase_ )
UpperCamelCase__ :int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase__ :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = FocalNetBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :List[Any] = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase__ :Optional[Any] = None
UpperCamelCase__ :Union[str, Any] = FocalNetBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = FocalNetForMaskedImageModeling(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ :Dict = 1
UpperCamelCase__ :List[Any] = FocalNetForMaskedImageModeling(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.type_sequence_label_size
UpperCamelCase__ :List[Any] = FocalNetForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ :int = 1
UpperCamelCase__ :Dict = FocalNetForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Any = config_and_inputs
UpperCamelCase__ :str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_a = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = FocalNetModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , embed_dim=37 , has_text_modality=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :Optional[Any] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :Tuple = model_class(UpperCamelCase_ )
UpperCamelCase__ :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :str = [*signature.parameters.keys()]
UpperCamelCase__ :Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase__ :Optional[int] = outputs.hidden_states
UpperCamelCase__ :int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# FocalNet has a different seq_length
UpperCamelCase__ :Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase__ :Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = reshaped_hidden_states[0].shape
UpperCamelCase__ :Union[str, Any] = (
reshaped_hidden_states[0].view(UpperCamelCase_ , UpperCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :Tuple = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :str = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Union[str, Any] = 3
UpperCamelCase__ :Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase__ :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ :Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase__ :Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :List[Any] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :str = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :int = FocalNetModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :List[str] = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[Any] = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(UpperCamelCase_ )
UpperCamelCase__ :str = self.default_image_processor
UpperCamelCase__ :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCamelCase__ :int = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(**UpperCamelCase_ )
# verify the logits
UpperCamelCase__ :List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = (FocalNetBackbone,) if is_torch_available() else ()
_a = FocalNetConfig
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = FocalNetModelTester(self ) | 97 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 1 |
'''simple docstring'''
__snake_case = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :List[str] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__snake_case = [None] * 10000000
__snake_case = True
__snake_case = False
def a ( __a ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ :Dict = chain(next_number(__a ) )
UpperCamelCase__ :int = number_chain
while number < 10000000:
UpperCamelCase__ :Optional[int] = number_chain
number *= 10
return number_chain
def a ( __a = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""") | 97 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = {}
if "candidate_labels" in kwargs:
UpperCamelCase__ :int = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase__ :Dict = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_="This is a photo of {}." ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = load_image(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCamelCase__ :List[Any] = candidate_labels
UpperCamelCase__ :Optional[int] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
UpperCamelCase__ :List[Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
UpperCamelCase__ :Any = [text_inputs]
return inputs
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = model_inputs.pop('''candidate_labels''' )
UpperCamelCase__ :Any = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
UpperCamelCase__ :Dict = text_inputs[0]
else:
# Batching case.
UpperCamelCase__ :Any = text_inputs[0][0]
UpperCamelCase__ :str = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = model_outputs.pop('''candidate_labels''' )
UpperCamelCase__ :List[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase__ :Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCamelCase__ :str = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = [scores]
elif self.framework == "tf":
UpperCamelCase__ :Any = stable_softmax(UpperCamelCase_ , axis=-1 )
UpperCamelCase__ :Tuple = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCamelCase__ :str = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def a ( __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def a ( __a ) -> str:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCamelCase__ :str = s_dict.pop(__a )
elif "subsample" in key:
UpperCamelCase__ :Dict = s_dict.pop(__a )
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = emb.weight.shape
UpperCamelCase__ :List[Any] = nn.Linear(__a , __a , bias=__a )
UpperCamelCase__ :List[Any] = emb.weight.data
return lin_layer
def a ( __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = torch.load(__a , map_location='''cpu''' )
UpperCamelCase__ :Tuple = mam_aaa['''args''']
UpperCamelCase__ :Optional[Any] = mam_aaa['''model''']
UpperCamelCase__ :Any = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__a )
rename_keys(__a )
UpperCamelCase__ :List[Any] = state_dict['''decoder.embed_tokens.weight'''].shape[0]
UpperCamelCase__ :Any = args.share_decoder_input_output_embed
UpperCamelCase__ :int = [int(__a ) for i in args.conv_kernel_sizes.split(''',''' )]
UpperCamelCase__ :Union[str, Any] = SpeechaTextConfig(
vocab_size=__a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__a ) , conv_channels=args.conv_channels , conv_kernel_sizes=__a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__a , num_beams=5 , max_length=200 , use_cache=__a , decoder_start_token_id=2 , early_stopping=__a , )
UpperCamelCase__ :Tuple = SpeechaTextForConditionalGeneration(__a )
UpperCamelCase__ , UpperCamelCase__ :Dict = model.model.load_state_dict(__a , strict=__a )
if len(__a ) > 0 and not set(__a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCamelCase__ :Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase__ :List[Any] = lm_head_weights
model.save_pretrained(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__snake_case = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path) | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__snake_case = pytest.mark.integration
@require_faiss
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(UpperCamelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :Dataset = self._create_dummy_dataset()
UpperCamelCase__ :Any = dset.map(
lambda UpperCamelCase_ , UpperCamelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCamelCase__ , UpperCamelCase__ :str = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase_ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase__ , UpperCamelCase__ :str = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(UpperCamelCase_ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
UpperCamelCase__ :Dataset = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
UpperCamelCase__ :Optional[int] = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCamelCase__ :str = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
UpperCamelCase__ :Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Tuple = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :List[str] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
UpperCamelCase__ :Any = np.zeros(5 , dtype=np.floataa )
UpperCamelCase__ :Optional[int] = 1
UpperCamelCase__ , UpperCamelCase__ :str = index.search(UpperCamelCase_ )
self.assertRaises(UpperCamelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCamelCase__ :List[str] = np.eye(5 , dtype=np.floataa )[::-1]
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = index.search_batch(UpperCamelCase_ )
self.assertRaises(UpperCamelCase_ , index.search_batch , queries[0] )
UpperCamelCase__ :List[Any] = [scores[0] for scores in total_scores]
UpperCamelCase__ :Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :Tuple = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCamelCase__ :Dict = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :List[Any] = faiss.IndexFlat(5 )
UpperCamelCase__ :List[Any] = FaissIndex(custom_index=UpperCamelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import faiss
UpperCamelCase__ :str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase_ ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase__ :Optional[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase__ :Dict = np.zeros(5 , dtype=np.floataa )
UpperCamelCase__ :str = 1
UpperCamelCase__ , UpperCamelCase__ :List[str] = index.search(UpperCamelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a ( __a ) -> Tuple:
'''simple docstring'''
import faiss
UpperCamelCase__ :Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase__ :Optional[int] = '''index.faiss'''
UpperCamelCase__ :Tuple = f'''mock://{index_name}'''
index.save(__a , storage_options=mockfs.storage_options )
UpperCamelCase__ :List[Any] = FaissIndex.load(__a , storage_options=mockfs.storage_options )
UpperCamelCase__ :Tuple = np.zeros(5 , dtype=np.floataa )
UpperCamelCase__ :int = 1
UpperCamelCase__ , UpperCamelCase__ :str = index.search(__a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
UpperCamelCase__ :Any = Elasticsearch()
UpperCamelCase__ :Any = {'''acknowledged''': True}
UpperCamelCase__ :int = ElasticSearchIndex(es_client=UpperCamelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
UpperCamelCase__ :Any = '''foo'''
UpperCamelCase__ :Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
UpperCamelCase__ , UpperCamelCase__ :int = index.search(UpperCamelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCamelCase__ :Dict = '''foo'''
UpperCamelCase__ :Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
UpperCamelCase__ , UpperCamelCase__ :Any = index.search(UpperCamelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCamelCase__ :Tuple = ['''foo''', '''bar''', '''foobar''']
UpperCamelCase__ :Optional[int] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
UpperCamelCase__ , UpperCamelCase__ :List[Any] = index.search_batch(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = [scores[0] for scores in total_scores]
UpperCamelCase__ :int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase_ )
# batched queries with timeout
UpperCamelCase__ :Any = ['''foo''', '''bar''', '''foobar''']
UpperCamelCase__ :int = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = index.search_batch(UpperCamelCase_ , request_timeout=30 )
UpperCamelCase__ :Any = [scores[0] for scores in total_scores]
UpperCamelCase__ :str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase_ ) | 97 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase ( A__ ):
"""simple docstring"""
_a = ['vqvae']
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , mel=UpperCamelCase_ , vqvae=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , UpperCamelCase_ ) else 1000
@torch.no_grad()
def __call__( self , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCamelCase__ :str = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCamelCase__ :List[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCamelCase_ , device=self.device , )
UpperCamelCase__ :str = noise
UpperCamelCase__ :Dict = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.mel.audio_slice_to_image(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
UpperCamelCase__ :List[str] = (input_image / 255) * 2 - 1
UpperCamelCase__ :Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCamelCase__ :int = self.vqvae.encode(torch.unsqueeze(UpperCamelCase_ , 0 ) ).latent_dist.sample(
generator=UpperCamelCase_ )[0]
UpperCamelCase__ :Dict = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCamelCase__ :Any = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , self.scheduler.timesteps[start_step - 1] )
UpperCamelCase__ :List[str] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCamelCase__ :Any = int(mask_start_secs * pixels_per_second )
UpperCamelCase__ :str = int(mask_end_secs * pixels_per_second )
UpperCamelCase__ :str = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCamelCase_ ):
UpperCamelCase__ :List[str] = self.unet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )['''sample''']
else:
UpperCamelCase__ :List[str] = self.unet(UpperCamelCase_ , UpperCamelCase_ )['''sample''']
if isinstance(self.scheduler , UpperCamelCase_ ):
UpperCamelCase__ :List[str] = self.scheduler.step(
model_output=UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , )['''prev_sample''']
else:
UpperCamelCase__ :Union[str, Any] = self.scheduler.step(
model_output=UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
UpperCamelCase__ :Union[str, Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCamelCase__ :List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCamelCase__ :List[str] = 1 / self.vqvae.config.scaling_factor * images
UpperCamelCase__ :Optional[int] = self.vqvae.decode(UpperCamelCase_ )['''sample''']
UpperCamelCase__ :Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ :List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCamelCase__ :Optional[Any] = (images * 255).round().astype('''uint8''' )
UpperCamelCase__ :Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCamelCase_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
UpperCamelCase__ :Optional[int] = [self.mel.image_to_audio(UpperCamelCase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase_ ) )
@torch.no_grad()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ )
UpperCamelCase__ :str = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
UpperCamelCase__ :List[str] = (sample / 255) * 2 - 1
UpperCamelCase__ :Dict = torch.Tensor(UpperCamelCase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCamelCase__ :List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCamelCase__ :List[Any] = self.scheduler.alphas_cumprod[t]
UpperCamelCase__ :Optional[int] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCamelCase__ :int = 1 - alpha_prod_t
UpperCamelCase__ :Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ )['''sample''']
UpperCamelCase__ :Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCamelCase__ :Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCamelCase__ :List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = acos(torch.dot(torch.flatten(UpperCamelCase_ ) , torch.flatten(UpperCamelCase_ ) ) / torch.norm(UpperCamelCase_ ) / torch.norm(UpperCamelCase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase_ ) + sin(alpha * theta ) * xa / sin(UpperCamelCase_ ) | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__snake_case = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__snake_case = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__snake_case = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__snake_case = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__snake_case = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__snake_case = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__snake_case = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = randrange(len(__a ) ), randrange(len(__a ) )
UpperCamelCase__ :Optional[Any] = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a ( __a = 100 ) -> Optional[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(__a ))
@pytest.mark.parametrize('''hand, expected''' , __a )
def a ( __a , __a ) -> str:
'''simple docstring'''
assert PokerHand(__a )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , __a )
def a ( __a , __a ) -> Any:
'''simple docstring'''
assert PokerHand(__a )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , __a )
def a ( __a , __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = PokerHand(__a )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , __a )
def a ( __a , __a ) -> Any:
'''simple docstring'''
assert PokerHand(__a )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , __a )
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(__a )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , __a )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
assert PokerHand(__a ).compare_with(PokerHand(__a ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
assert PokerHand(__a ).compare_with(PokerHand(__a ) ) == expected
def a ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :int = [PokerHand(__a ) for hand in SORTED_HANDS]
UpperCamelCase__ :Tuple = poker_hands.copy()
shuffle(__a )
UpperCamelCase__ :List[Any] = chain(sorted(__a ) )
for index, hand in enumerate(__a ):
assert hand == poker_hands[index]
def a ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Tuple = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=__a )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = PokerHand('''2C 4S AS 3D 5C''' )
UpperCamelCase__ :str = True
UpperCamelCase__ :Any = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :str = 0
UpperCamelCase__ :Optional[int] = os.path.abspath(os.path.dirname(__a ) )
UpperCamelCase__ :Dict = os.path.join(__a , '''poker_hands.txt''' )
with open(__a ) as file_hand:
for line in file_hand:
UpperCamelCase__ :List[str] = line[:14].strip()
UpperCamelCase__ :str = line[15:].strip()
UpperCamelCase__ , UpperCamelCase__ :List[Any] = PokerHand(__a ), PokerHand(__a )
UpperCamelCase__ :Optional[Any] = player.compare_with(__a )
if output == "Win":
answer += 1
assert answer == 376 | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
'''simple docstring'''
from math import factorial
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = real
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = [1] * rank
else:
UpperCamelCase__ :Union[str, Any] = rank
def __repr__( self ):
'''simple docstring'''
return (
F'''{self.real}+'''
F'''{"+".join(str(UpperCamelCase_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , UpperCamelCase_ )
def __add__( self , UpperCamelCase_ ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return Dual(self.real + other , self.duals )
UpperCamelCase__ :str = self.duals.copy()
UpperCamelCase__ :List[Any] = other.duals.copy()
if len(UpperCamelCase_ ) > len(UpperCamelCase_ ):
o_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) )
elif len(UpperCamelCase_ ) < len(UpperCamelCase_ ):
s_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) )
UpperCamelCase__ :Tuple = []
for i in range(len(UpperCamelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , UpperCamelCase_ )
_a = __add__
def __sub__( self , UpperCamelCase_ ):
'''simple docstring'''
return self + other * -1
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Dict = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , UpperCamelCase_ )
_a = __mul__
def __truediv__( self , UpperCamelCase_ ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :str = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , UpperCamelCase_ )
raise ValueError
def __floordiv__( self , UpperCamelCase_ ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , UpperCamelCase_ )
raise ValueError
def __pow__( self , UpperCamelCase_ ):
'''simple docstring'''
if n < 0 or isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase__ :List[Any] = self
for _ in range(n - 1 ):
x *= self
return x
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if not callable(__a ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(__a , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(__a , __a ):
raise ValueError('''differentiate() requires an int as input for order''' )
UpperCamelCase__ :Union[str, Any] = Dual(__a , 1 )
UpperCamelCase__ :List[Any] = func(__a )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
def a ( __a ) -> Optional[int]:
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2)) | 97 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.txt'''}
__snake_case = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ConvBertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
UpperCamelCase__ :List[Any] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
UpperCamelCase__ :Any = do_lower_case
UpperCamelCase__ :Optional[Any] = strip_accents
UpperCamelCase__ :List[Any] = tokenize_chinese_chars
UpperCamelCase__ :Any = normalizer_class(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = do_lower_case
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :List[str] = [self.sep_token_id]
UpperCamelCase__ :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ ) | 97 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 | 1 |
'''simple docstring'''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = generate_pascal_triangle(__a )
for row_idx in range(__a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def a ( __a ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__a , __a ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCamelCase__ :list[list[int]] = []
for current_row_idx in range(__a ):
UpperCamelCase__ :List[Any] = populate_current_row(__a , __a )
triangle.append(__a )
return triangle
def a ( __a , __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :int = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCamelCase__ , UpperCamelCase__ :Dict = 1, 1
for current_col_idx in range(1 , __a ):
calculate_current_element(
__a , __a , __a , __a )
return current_row
def a ( __a , __a , __a , __a , ) -> None:
'''simple docstring'''
UpperCamelCase__ :Any = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCamelCase__ :List[Any] = triangle[current_row_idx - 1][current_col_idx]
UpperCamelCase__ :List[Any] = above_to_left_elt + above_to_right_elt
def a ( __a ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__a , __a ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCamelCase__ :list[list[int]] = [[1]]
for row_index in range(1 , __a ):
UpperCamelCase__ :Any = [0] + result[-1] + [0]
UpperCamelCase__ :Optional[int] = row_index + 1
# Calculate the number of distinct elements in a row
UpperCamelCase__ :Any = sum(divmod(__a , 2 ) )
UpperCamelCase__ :Optional[int] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCamelCase__ :Optional[int] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCamelCase__ :Dict = row_first_half + row_second_half
result.append(__a )
return result
def a ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a , __a ) -> None:
UpperCamelCase__ :List[str] = f'''{func.__name__}({value})'''
UpperCamelCase__ :List[str] = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 97 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 1 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__snake_case = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__snake_case = direct_transformers_import(PATH_TO_TRANSFORMERS)
__snake_case = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__snake_case = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def a ( __a , __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :str = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
UpperCamelCase__ :Dict = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , __a , )
is not None
):
UpperCamelCase__ :str = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCamelCase__ :str = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCamelCase__ :Optional[Any] = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCamelCase__ :str = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCamelCase__ :Tuple = True
if not attribute_used:
UpperCamelCase__ :Optional[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCamelCase__ :Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCamelCase__ :str = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCamelCase__ :Dict = True
elif attribute.endswith('''_token_id''' ):
UpperCamelCase__ :Dict = True
# configuration class specific cases
if not case_allowed:
UpperCamelCase__ :Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCamelCase__ :str = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Dict = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCamelCase__ :Union[str, Any] = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCamelCase__ :int = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCamelCase__ :Tuple = {}
if len(config_class.attribute_map ) > 0:
UpperCamelCase__ :int = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCamelCase__ :Tuple = inspect.getsourcefile(__a )
UpperCamelCase__ :Union[str, Any] = os.path.dirname(__a )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCamelCase__ :Optional[int] = [os.path.join(__a , __a ) for fn in os.listdir(__a ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCamelCase__ :Optional[int] = []
for path in modeling_paths:
if os.path.isfile(__a ):
with open(__a ) as fp:
modeling_sources.append(fp.read() )
UpperCamelCase__ :Optional[int] = []
for config_param, default_value in zip(__a , __a ):
# `attributes` here is all the variant names for `config_param`
UpperCamelCase__ :Dict = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__a , __a , __a , __a ):
unused_attributes.append(attributes[0] )
return sorted(__a )
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :str = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCamelCase__ :List[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __a : inspect.isclass(__a )
and issubclass(__a , __a )
and inspect.getmodule(__a ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCamelCase__ :List[Any] = check_config_attributes_being_used(__a )
if len(__a ) > 0:
UpperCamelCase__ :Dict = unused_attributes
if len(__a ) > 0:
UpperCamelCase__ :Optional[int] = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(__a )
if __name__ == "__main__":
check_config_attributes() | 97 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__snake_case = '''
Human: <<task>>
Assistant: '''
__snake_case = '''huggingface-tools/default-prompts'''
__snake_case = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def a ( __a , __a , __a="run" ) -> Union[str, Any]:
'''simple docstring'''
if prompt_or_repo_id is None:
UpperCamelCase__ :int = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __a ) is not None:
return prompt_or_repo_id
UpperCamelCase__ :Optional[int] = cached_file(
__a , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__a , '''r''' , encoding='''utf-8''' ) as f:
return f.read() | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCamelCase__ :int = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :Union[str, Any] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCamelCase__ :Optional[int] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :Union[str, Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :Dict = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Union[str, Any] = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) ) | 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a ( __a , __a = True , __a = math.inf , __a = -math.inf , __a = math.inf , __a = -math.inf , __a = False , __a = 100 , __a = 0.0_1 , __a = 1 , ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = search_prob
UpperCamelCase__ :List[str] = start_temperate
UpperCamelCase__ :List[str] = []
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Dict = None
while not search_end:
UpperCamelCase__ :List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase__ :int = current_state
scores.append(__a )
iterations += 1
UpperCamelCase__ :List[Any] = None
UpperCamelCase__ :Dict = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase__ :Any = random.randint(0 , len(__a ) - 1 ) # picking a random neighbor
UpperCamelCase__ :int = neighbors.pop(__a )
UpperCamelCase__ :str = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase__ :str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase__ :str = picked_neighbor
else:
UpperCamelCase__ :Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase__ :Optional[int] = picked_neighbor
UpperCamelCase__ :List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase__ :Any = True
else:
UpperCamelCase__ :Tuple = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__a ) , __a )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
__snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a ( __a , __a ) -> Any:
'''simple docstring'''
return (3 * x**2) - (6 * y)
__snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
__snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
) | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__snake_case = False
__snake_case = logging.get_logger(__name__)
__snake_case = '''ybelkada/fonts'''
def a ( ) -> Optional[Any]:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def a ( __a , __a , __a ) -> Optional[int]:
'''simple docstring'''
requires_backends(__a , ['''torch'''] )
_check_torch_version()
UpperCamelCase__ :Tuple = image_tensor.unsqueeze(0 )
UpperCamelCase__ :Optional[int] = torch.nn.functional.unfold(__a , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase__ :Union[str, Any] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __a , __a , -1 )
UpperCamelCase__ :List[Any] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def a ( __a , __a = 36 , __a = "black" , __a = "white" , __a = 5 , __a = 5 , __a = 5 , __a = 5 , __a = None , __a = None , ) -> Image.Image:
'''simple docstring'''
requires_backends(__a , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase__ :Union[str, Any] = textwrap.TextWrapper(width=80 )
UpperCamelCase__ :int = wrapper.wrap(text=__a )
UpperCamelCase__ :Union[str, Any] = '''\n'''.join(__a )
if font_bytes is not None and font_path is None:
UpperCamelCase__ :str = io.BytesIO(__a )
elif font_path is not None:
UpperCamelCase__ :int = font_path
else:
UpperCamelCase__ :int = hf_hub_download(__a , '''Arial.TTF''' )
UpperCamelCase__ :List[str] = ImageFont.truetype(__a , encoding='''UTF-8''' , size=__a )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase__ :Optional[int] = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , __a ) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = temp_draw.textbbox((0, 0) , __a , __a )
# Create the actual image with a bit of padding around the text.
UpperCamelCase__ :List[Any] = text_width + left_padding + right_padding
UpperCamelCase__ :int = text_height + top_padding + bottom_padding
UpperCamelCase__ :str = Image.new('''RGB''' , (image_width, image_height) , __a )
UpperCamelCase__ :Tuple = ImageDraw.Draw(__a )
draw.text(xy=(left_padding, top_padding) , text=__a , fill=__a , font=__a )
return image
def a ( __a , __a , **__a ) -> Optional[Any]:
'''simple docstring'''
requires_backends(__a , '''vision''' )
# Convert to PIL image if necessary
UpperCamelCase__ :Optional[int] = to_pil_image(__a )
UpperCamelCase__ :List[str] = render_text(__a , **__a )
UpperCamelCase__ :List[str] = max(header_image.width , image.width )
UpperCamelCase__ :List[str] = int(image.height * (new_width / image.width) )
UpperCamelCase__ :Dict = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase__ :Dict = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase__ :Optional[int] = to_numpy_array(__a )
if infer_channel_dimension_format(__a ) == ChannelDimension.LAST:
UpperCamelCase__ :int = to_channel_dimension_format(__a , ChannelDimension.LAST )
return new_image
class lowercase ( A__ ):
"""simple docstring"""
_a = ['flattened_patches']
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = 2048 , UpperCamelCase_ = False , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :List[Any] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
UpperCamelCase__ :Optional[int] = do_normalize
UpperCamelCase__ :List[str] = do_convert_rgb
UpperCamelCase__ :Union[str, Any] = max_patches
UpperCamelCase__ :Optional[int] = is_vqa
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
UpperCamelCase__ :Union[str, Any] = to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.FIRST )
UpperCamelCase__ :Dict = torch.from_numpy(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Tuple = patch_size['''height'''], patch_size['''width''']
UpperCamelCase__ , UpperCamelCase__ :Any = get_image_size(UpperCamelCase_ )
# maximize scale s.t.
UpperCamelCase__ :Tuple = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase__ :Tuple = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase_ ) , 1 )
UpperCamelCase__ :Optional[Any] = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase_ ) , 1 )
UpperCamelCase__ :str = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase__ :Optional[int] = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase__ :Optional[int] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=UpperCamelCase_ , antialias=UpperCamelCase_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase__ :Optional[int] = torch_extract_patches(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = patches.shape
UpperCamelCase__ :Dict = patches_shape[1]
UpperCamelCase__ :int = patches_shape[2]
UpperCamelCase__ :List[str] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase__ :int = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase__ :Union[str, Any] = torch.arange(UpperCamelCase_ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase_ ).reshape([rows * columns, 1] )
UpperCamelCase__ :str = torch.arange(UpperCamelCase_ ).reshape([1, columns] ).repeat(UpperCamelCase_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase__ :List[Any] = row_ids.to(torch.floataa )
UpperCamelCase__ :int = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase__ :Any = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase__ :Any = torch.nn.functional.pad(UpperCamelCase_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase__ :Union[str, Any] = to_numpy_array(UpperCamelCase_ )
return result
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ):
'''simple docstring'''
if image.dtype == np.uinta:
UpperCamelCase__ :Dict = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase__ :Optional[int] = np.mean(UpperCamelCase_ )
UpperCamelCase__ :str = np.std(UpperCamelCase_ )
UpperCamelCase__ :int = max(UpperCamelCase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ :Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ :List[Any] = patch_size if patch_size is not None else self.patch_size
UpperCamelCase__ :Any = max_patches if max_patches is not None else self.max_patches
UpperCamelCase__ :Optional[int] = self.is_vqa
if kwargs.get('''data_format''' , UpperCamelCase_ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
UpperCamelCase__ :Dict = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ :List[str] = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ :Optional[int] = [to_numpy_array(UpperCamelCase_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
UpperCamelCase__ :Tuple = kwargs.pop('''font_bytes''' , UpperCamelCase_ )
UpperCamelCase__ :Any = kwargs.pop('''font_path''' , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Any = [header_text] * len(UpperCamelCase_ )
UpperCamelCase__ :str = [
render_header(UpperCamelCase_ , header_text[i] , font_bytes=UpperCamelCase_ , font_path=UpperCamelCase_ )
for i, image in enumerate(UpperCamelCase_ )
]
if do_normalize:
UpperCamelCase__ :Optional[int] = [self.normalize(image=UpperCamelCase_ ) for image in images]
# convert to torch tensor and permute
UpperCamelCase__ :Optional[Any] = [
self.extract_flattened_patches(image=UpperCamelCase_ , max_patches=UpperCamelCase_ , patch_size=UpperCamelCase_ )
for image in images
]
# create attention mask in numpy
UpperCamelCase__ :Optional[int] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase__ :List[str] = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=UpperCamelCase_ )
return encoded_outputs | 97 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__snake_case = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 97 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spiece.model'''}
__snake_case = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__snake_case = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__snake_case = 0
__snake_case = 1
__snake_case = 2
__snake_case = 3
__snake_case = 4
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = 'left'
def __init__( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<sep>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<cls>" , UpperCamelCase_="<mask>" , UpperCamelCase_=["<eop>", "<eod>"] , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
UpperCamelCase__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
UpperCamelCase__ :List[Any] = 3
UpperCamelCase__ :Any = do_lower_case
UpperCamelCase__ :int = remove_space
UpperCamelCase__ :List[Any] = keep_accents
UpperCamelCase__ :Optional[int] = vocab_file
UpperCamelCase__ :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.__dict__.copy()
UpperCamelCase__ :List[Any] = None
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :Optional[int] = {}
UpperCamelCase__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.remove_space:
UpperCamelCase__ :Tuple = ''' '''.join(inputs.strip().split() )
else:
UpperCamelCase__ :Tuple = inputs
UpperCamelCase__ :str = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
UpperCamelCase__ :Optional[Any] = unicodedata.normalize('''NFKD''' , UpperCamelCase_ )
UpperCamelCase__ :Any = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase_ )] )
if self.do_lower_case:
UpperCamelCase__ :List[Any] = outputs.lower()
return outputs
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.preprocess_text(UpperCamelCase_ )
UpperCamelCase__ :str = self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = []
for piece in pieces:
if len(UpperCamelCase_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
UpperCamelCase__ :str = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase__ :Union[str, Any] = cur_pieces[1:]
else:
UpperCamelCase__ :Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase_ )
else:
new_pieces.append(UpperCamelCase_ )
return new_pieces
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = True , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = kwargs.pop('''use_source_tokenizer''' , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = self.convert_ids_to_tokens(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase__ :Union[str, Any] = []
UpperCamelCase__ :List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
UpperCamelCase__ :List[str] = []
sub_texts.append(UpperCamelCase_ )
else:
current_sub_text.append(UpperCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase__ :str = ''''''.join(UpperCamelCase_ )
UpperCamelCase__ :int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase__ :List[Any] = self.clean_up_tokenization(UpperCamelCase_ )
return clean_text
else:
return text
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :str = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1, 1]
return ([0] * len(UpperCamelCase_ )) + [1, 1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
UpperCamelCase__ :Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,) | 97 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def a ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Optional[int] = g.get_repo('''huggingface/diffusers''' )
UpperCamelCase__ :Optional[int] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :Optional[int] = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :Optional[int] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ :int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ :int = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCamelCase__ :Union[str, Any] = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
UpperCamelCase__ :Union[str, Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
UpperCamelCase__ :Any = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ :Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_tokenizer()
UpperCamelCase__ :Dict = self.get_rust_tokenizer()
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ :str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Any = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ :Optional[Any] = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
UpperCamelCase__ :List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :List[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :str = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :Dict = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
UpperCamelCase__ :List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[str] = '''lower newer'''
UpperCamelCase__ :str = processor(text=UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.get_image_processor()
UpperCamelCase__ :Tuple = self.get_tokenizer()
UpperCamelCase__ :List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Tuple = '''lower newer'''
UpperCamelCase__ :List[Any] = self.prepare_image_inputs()
UpperCamelCase__ :List[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.get_image_processor()
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :Dict = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ :List[Any] = processor(images=UpperCamelCase_ , visual_prompt=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
UpperCamelCase__ :Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ :Union[str, Any] = processor.batch_decode(UpperCamelCase_ )
UpperCamelCase__ :Dict = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) | 97 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 1 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 1 |
'''simple docstring'''
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :Any = len(__a ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase__ :Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__a ):
return None
UpperCamelCase__ :Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCamelCase__ :Dict = left
UpperCamelCase__ :str = point
elif point > right:
UpperCamelCase__ :Any = right
UpperCamelCase__ :int = point
else:
if item < current_item:
UpperCamelCase__ :int = point - 1
else:
UpperCamelCase__ :int = point + 1
return None
def a ( __a , __a , __a , __a ) -> int:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase__ :Any = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__a ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__a , __a , __a , __a )
elif point > right:
return interpolation_search_by_recursion(__a , __a , __a , __a )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__a , __a , __a , point - 1 )
else:
return interpolation_search_by_recursion(
__a , __a , point + 1 , __a )
def a ( __a ) -> List[str]:
'''simple docstring'''
if collection != sorted(__a ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
__snake_case = 0
if debug == 1:
__snake_case = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
__snake_case = 67
__snake_case = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('''Not found''') | 97 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 1 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def a ( __a ) -> str:
'''simple docstring'''
for i in range(0 , __a ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def a ( __a ) -> Any:
'''simple docstring'''
for i in range(__a , 0 , -1 ):
for _ in range(__a , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def a ( __a ) -> List[str]:
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(__a ) # upper half
reverse_floyd(__a ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
__snake_case = 1
while K:
__snake_case = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__snake_case = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''') | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
__snake_case = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
__snake_case = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def a ( __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ :Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ :Optional[int] = numpy_to_pil(__a )
return images
def a ( __a ) -> Dict:
'''simple docstring'''
if images.ndim == 3:
UpperCamelCase__ :List[str] = images[None, ...]
UpperCamelCase__ :Dict = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ :Union[str, Any] = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
UpperCamelCase__ :Optional[Any] = [Image.fromarray(__a ) for image in images]
return pil_images | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = BioGptTokenizer
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCamelCase__ :List[str] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :Dict = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCamelCase__ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = '''lower newer'''
UpperCamelCase__ :Dict = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase__ :Optional[Any] = '''lower'''
UpperCamelCase__ :Optional[int] = ['''low''', '''er</w>''']
UpperCamelCase__ :Dict = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokens + ['''<unk>''']
UpperCamelCase__ :Dict = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ :str = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
UpperCamelCase__ :str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a ) | 97 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 1 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def a ( __a , __a , __a=1e-12 ) -> str:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
UpperCamelCase__ :Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
return jnp.matmul(__a , norm_emb_a.T )
class lowercase ( nn.Module ):
"""simple docstring"""
_a = 42
_a = jnp.floataa
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase__ :Tuple = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase_ , dtype=self.dtype )
UpperCamelCase__ :Optional[Any] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCamelCase__ :Optional[Any] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase__ :Tuple = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
UpperCamelCase__ :List[str] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.vision_model(UpperCamelCase_ )[1]
UpperCamelCase__ :Optional[int] = self.visual_projection(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = jax_cosine_distance(UpperCamelCase_ , self.special_care_embeds )
UpperCamelCase__ :List[str] = jax_cosine_distance(UpperCamelCase_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase__ :int = 0.0
UpperCamelCase__ :int = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase__ :Optional[int] = jnp.round(UpperCamelCase_ , 3 )
UpperCamelCase__ :Union[str, Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase_ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase__ :Union[str, Any] = is_special_care * 0.01
UpperCamelCase__ :List[str] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase__ :Optional[int] = jnp.round(UpperCamelCase_ , 3 )
UpperCamelCase__ :int = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class lowercase ( A__ ):
"""simple docstring"""
_a = CLIPConfig
_a = 'clip_input'
_a = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = jnp.floataa , UpperCamelCase_ = True , **UpperCamelCase_ , ):
'''simple docstring'''
if input_shape is None:
UpperCamelCase__ :Union[str, Any] = (1, 224, 224, 3)
UpperCamelCase__ :Dict = self.module_class(config=UpperCamelCase_ , dtype=UpperCamelCase_ , **UpperCamelCase_ )
super().__init__(UpperCamelCase_ , UpperCamelCase_ , input_shape=UpperCamelCase_ , seed=UpperCamelCase_ , dtype=UpperCamelCase_ , _do_init=_do_init )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Dict = jax.random.normal(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = jax.random.split(UpperCamelCase_ )
UpperCamelCase__ :Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
UpperCamelCase__ :List[str] = self.module.init(UpperCamelCase_ , UpperCamelCase_ )['''params''']
return random_params
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , ):
'''simple docstring'''
UpperCamelCase__ :Any = jnp.transpose(UpperCamelCase_ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(UpperCamelCase_ , dtype=jnp.floataa ) , rngs={} , ) | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def a ( __a="" ) -> str:
'''simple docstring'''
UpperCamelCase__ :Dict = tempfile.mkdtemp()
return os.path.join(__a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCamelCase__ :str = AgentAudio(UpperCamelCase_ )
UpperCamelCase__ :Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = sf.read(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , torch.tensor(UpperCamelCase_ ) , atol=1e-4 ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCamelCase__ :Optional[Any] = get_new_path(suffix='''.wav''' )
sf.write(UpperCamelCase_ , UpperCamelCase_ , 16000 )
UpperCamelCase__ :List[Any] = AgentAudio(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase_ )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = torch.randint(0 , 256 , (64, 64, 3) )
UpperCamelCase__ :List[str] = AgentImage(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCamelCase__ :str = Image.open(UpperCamelCase_ )
UpperCamelCase__ :int = AgentImage(UpperCamelCase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCamelCase__ :List[Any] = Image.open(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = AgentImage(UpperCamelCase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = '''Hey!'''
UpperCamelCase__ :str = AgentText(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , agent_type.to_string() )
self.assertEqual(UpperCamelCase_ , agent_type.to_raw() )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = data
UpperCamelCase__ :Node | None = None
UpperCamelCase__ :Node | None = None
def a ( __a ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def a ( __a ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def a ( __a ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def a ( ) -> None: # Main function for testing.
'''simple docstring'''
UpperCamelCase__ :Dict = Node(1 )
UpperCamelCase__ :Tuple = Node(2 )
UpperCamelCase__ :Tuple = Node(3 )
UpperCamelCase__ :Any = Node(4 )
UpperCamelCase__ :List[Any] = Node(5 )
UpperCamelCase__ :List[str] = Node(6 )
UpperCamelCase__ :Union[str, Any] = Node(7 )
UpperCamelCase__ :List[Any] = Node(8 )
UpperCamelCase__ :int = Node(9 )
print(is_full_binary_tree(__a ) )
print(depth_of_tree(__a ) )
print('''Tree is: ''' )
display(__a )
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a ( __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :List[Any] = filter(lambda __a : p.requires_grad , model.parameters() )
UpperCamelCase__ :Any = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__snake_case = logging.getLogger(__name__)
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
if metric == "rouge2":
UpperCamelCase__ :Tuple = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
UpperCamelCase__ :Dict = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
UpperCamelCase__ :Optional[int] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
UpperCamelCase__ :Union[str, Any] = ModelCheckpoint(
dirpath=__a , filename=__a , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def a ( __a , __a ) -> Any:
'''simple docstring'''
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__a , verbose=__a , )
class lowercase ( pl.Callback ):
"""simple docstring"""
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = {F'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCamelCase_ )
@rank_zero_only
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=True ):
'''simple docstring'''
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCamelCase__ :int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
UpperCamelCase__ :Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase__ :str = od / '''test_results.txt'''
UpperCamelCase__ :Dict = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase__ :str = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCamelCase__ :List[str] = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=UpperCamelCase_ )
generations_file.parent.mkdir(exist_ok=UpperCamelCase_ )
with open(UpperCamelCase_ , '''a+''' ) as writer:
for key in sorted(UpperCamelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase__ :Union[str, Any] = metrics[key]
if isinstance(UpperCamelCase_ , torch.Tensor ):
UpperCamelCase__ :Tuple = val.item()
UpperCamelCase__ :int = F'''{key}: {val:.6f}\n'''
writer.write(UpperCamelCase_ )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase__ :List[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(UpperCamelCase_ )
@rank_zero_only
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
try:
UpperCamelCase__ :Dict = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase__ :str = pl_module.model.num_parameters()
UpperCamelCase__ :Optional[Any] = count_trainable_parameters(UpperCamelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCamelCase_ , UpperCamelCase_ , '''test''' )
@rank_zero_only
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 97 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__snake_case = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'albert'
def __init__( self , UpperCamelCase_=30000 , UpperCamelCase_=128 , UpperCamelCase_=4096 , UpperCamelCase_=12 , UpperCamelCase_=1 , UpperCamelCase_=64 , UpperCamelCase_=16384 , UpperCamelCase_=1 , UpperCamelCase_="gelu_new" , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0.1 , UpperCamelCase_="absolute" , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=3 , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Tuple = vocab_size
UpperCamelCase__ :int = embedding_size
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :Dict = num_hidden_layers
UpperCamelCase__ :Tuple = num_hidden_groups
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Any = inner_group_num
UpperCamelCase__ :str = hidden_act
UpperCamelCase__ :Union[str, Any] = intermediate_size
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :int = max_position_embeddings
UpperCamelCase__ :str = type_vocab_size
UpperCamelCase__ :str = initializer_range
UpperCamelCase__ :Tuple = layer_norm_eps
UpperCamelCase__ :int = classifier_dropout_prob
UpperCamelCase__ :Optional[Any] = position_embedding_type
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] ) | 97 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 | 1 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = parent
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {}
def a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
UpperCamelCase__ :int = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.feature_extraction_class()
# Test not batched input
UpperCamelCase__ :int = get_html_strings()[0]
UpperCamelCase__ :int = feature_extractor(UpperCamelCase_ )
# fmt: off
UpperCamelCase__ :Union[str, Any] = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
UpperCamelCase__ :Optional[Any] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , UpperCamelCase_ )
self.assertEqual(encoding.xpaths , UpperCamelCase_ )
# Test batched
UpperCamelCase__ :str = get_html_strings()
UpperCamelCase__ :Optional[Any] = feature_extractor(UpperCamelCase_ )
# fmt: off
UpperCamelCase__ :List[str] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
UpperCamelCase__ :int = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCamelCase_ )
self.assertEqual(encoding.xpaths , UpperCamelCase_ ) | 97 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=2 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=36 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=6 , UpperCamelCase_=6 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , UpperCamelCase_=1000 , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :List[Any] = num_channels
UpperCamelCase__ :int = image_size
UpperCamelCase__ :str = patch_size
UpperCamelCase__ :Optional[Any] = text_seq_length
UpperCamelCase__ :str = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Any = use_token_type_ids
UpperCamelCase__ :List[Any] = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[Any] = hidden_size
UpperCamelCase__ :Any = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ :List[Any] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :Optional[int] = type_sequence_label_size
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :Any = coordinate_size
UpperCamelCase__ :Optional[Any] = shape_size
UpperCamelCase__ :Any = num_labels
UpperCamelCase__ :Union[str, Any] = num_choices
UpperCamelCase__ :Any = scope
UpperCamelCase__ :List[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :Optional[int] = text_seq_length
UpperCamelCase__ :str = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Optional[int] = self.text_seq_length + self.image_seq_length
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :int = bbox[i, j, 3]
UpperCamelCase__ :Union[str, Any] = bbox[i, j, 1]
UpperCamelCase__ :List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :List[str] = bbox[i, j, 2]
UpperCamelCase__ :Union[str, Any] = bbox[i, j, 0]
UpperCamelCase__ :List[Any] = t
UpperCamelCase__ :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :List[Any] = None
if self.use_input_mask:
UpperCamelCase__ :Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Any = None
if self.use_token_type_ids:
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :Tuple = None
if self.use_labels:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Tuple = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = LayoutLMvaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# text + image
UpperCamelCase__ :List[str] = model(UpperCamelCase_ , pixel_values=UpperCamelCase_ )
UpperCamelCase__ :int = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = model(UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
UpperCamelCase__ :str = model(UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Dict = model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :List[str] = model(pixel_values=UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :Tuple = LayoutLMvaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :int = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :int = LayoutLMvaForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Tuple = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = LayoutLMvaForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :int = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = config_and_inputs
UpperCamelCase__ :Optional[int] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = False
_a = False
_a = False
_a = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
return True
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = LayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
UpperCamelCase__ :int = copy.deepcopy(UpperCamelCase_ )
if model_class in get_values(UpperCamelCase_ ):
UpperCamelCase__ :Dict = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
elif model_class in get_values(UpperCamelCase_ ):
UpperCamelCase__ :int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
UpperCamelCase__ :int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
elif model_class in [
*get_values(UpperCamelCase_ ),
]:
UpperCamelCase__ :Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
elif model_class in [
*get_values(UpperCamelCase_ ),
]:
UpperCamelCase__ :int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase_ , )
return inputs_dict
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Optional[Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Optional[Any] = LayoutLMvaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.default_image_processor
UpperCamelCase__ :Optional[Any] = prepare_img()
UpperCamelCase__ :List[str] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).pixel_values.to(UpperCamelCase_ )
UpperCamelCase__ :List[str] = torch.tensor([[1, 2]] )
UpperCamelCase__ :Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCamelCase__ :str = model(
input_ids=input_ids.to(UpperCamelCase_ ) , bbox=bbox.to(UpperCamelCase_ ) , pixel_values=pixel_values.to(UpperCamelCase_ ) , )
# verify the logits
UpperCamelCase__ :Tuple = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase_ )
UpperCamelCase__ :Tuple = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ) ) | 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Any = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase__ :Union[str, Any] = parent
UpperCamelCase__ :Tuple = batch_size
UpperCamelCase__ :int = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :List[str] = min_resolution
UpperCamelCase__ :Optional[int] = max_resolution
UpperCamelCase__ :Optional[Any] = do_resize
UpperCamelCase__ :Optional[int] = size
UpperCamelCase__ :List[Any] = apply_ocr
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''apply_ocr''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase__ :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase_ )
self.assertIsInstance(encoding.boxes , UpperCamelCase_ )
# Test batched
UpperCamelCase__ :int = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ :Tuple = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ :Dict = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase__ :int = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase__ :int = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase__ :str = image_processing(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase__ :Tuple = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase__ :Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase_ )
self.assertListEqual(encoding.boxes , UpperCamelCase_ )
# with apply_OCR = False
UpperCamelCase__ :List[str] = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ )
UpperCamelCase__ :Tuple = image_processing(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''prophetnet.tokenizer'''}
__snake_case = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
__snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
__snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': 512,
}
def a ( __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = collections.OrderedDict()
with open(__a , '''r''' , encoding='''utf-8''' ) as reader:
UpperCamelCase__ :int = reader.readlines()
for index, token in enumerate(__a ):
UpperCamelCase__ :Tuple = token.rstrip('''\n''' )
UpperCamelCase__ :List[Any] = index
return vocab
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self , UpperCamelCase_ , UpperCamelCase_="[SEP]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[UNK]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
UpperCamelCase__ :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
UpperCamelCase__ :Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
UpperCamelCase__ :Any = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
UpperCamelCase__ :str = F'''[unused{i}]'''
UpperCamelCase__ :Dict = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
UpperCamelCase__ :str = 12
UpperCamelCase__ :int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCamelCase_ )
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.__dict__.copy()
UpperCamelCase__ :Optional[Any] = None
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :Optional[Any] = {}
UpperCamelCase__ :Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return ([0] * len(UpperCamelCase_ )) + [1]
return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ :Tuple = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :str = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
UpperCamelCase__ :Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
UpperCamelCase__ :Tuple = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep | 97 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__snake_case = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
__snake_case = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__snake_case = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__snake_case = sorted(arg_to_scheduler.keys())
__snake_case = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class lowercase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_="base" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase_ )
UpperCamelCase__ :Any = 0
UpperCamelCase__ :str = Path(self.hparams.output_dir )
UpperCamelCase__ :List[str] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCamelCase__ :Optional[int] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase_ , **UpperCamelCase_ , )
else:
UpperCamelCase__ :PretrainedConfig = config
UpperCamelCase__ :int = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(self.config , UpperCamelCase_ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , UpperCamelCase_ , getattr(self.hparams , UpperCamelCase_ ) )
if tokenizer is None:
UpperCamelCase__ :Dict = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase_ , )
else:
UpperCamelCase__ :PreTrainedTokenizer = tokenizer
UpperCamelCase__ :Optional[int] = MODEL_MODES[mode]
if model is None:
UpperCamelCase__ :str = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase_ , )
else:
UpperCamelCase__ :List[Any] = model
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.model_type.from_pretrained(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCamelCase__ :Dict = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCamelCase__ :Any = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.model
UpperCamelCase__ :Optional[Any] = ['''bias''', '''LayerNorm.weight''']
UpperCamelCase__ :Optional[Any] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCamelCase__ :List[str] = Adafactor(
UpperCamelCase_ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase_ , relative_step=UpperCamelCase_ )
else:
UpperCamelCase__ :int = AdamW(
UpperCamelCase_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCamelCase__ :str = optimizer
UpperCamelCase__ :int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
return self.validation_step(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.validation_end(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCamelCase__ :List[str] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if stage == "test":
UpperCamelCase__ :Union[str, Any] = len(self.test_dataloader().dataset )
else:
UpperCamelCase__ :List[Any] = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = len(self.train_dataloader().dataset )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.train_loader
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
UpperCamelCase_ , list(filter(UpperCamelCase_ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.output_dir.joinpath('''best_tfmr''' )
UpperCamelCase__ :int = self.step_count
self.model.save_pretrained(UpperCamelCase_ )
self.tokenizer.save_pretrained(UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=UpperCamelCase_ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(UpperCamelCase_ ).parent / '''test_run''' / '''cache''' ) , type=UpperCamelCase_ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=UpperCamelCase_ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=UpperCamelCase_ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=UpperCamelCase_ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=UpperCamelCase_ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=UpperCamelCase_ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=UpperCamelCase_ , metavar=UpperCamelCase_ , type=UpperCamelCase_ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=UpperCamelCase_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=UpperCamelCase_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=UpperCamelCase_ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=UpperCamelCase_ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=UpperCamelCase_ )
parser.add_argument('''--train_batch_size''' , default=32 , type=UpperCamelCase_ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=UpperCamelCase_ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowercase ( pl.Callback ):
"""simple docstring"""
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowercase ( pl.Callback ):
"""simple docstring"""
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase_ )
class lowercase ( pl.Callback ):
"""simple docstring"""
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = trainer.lr_schedulers[0]['''scheduler''']
UpperCamelCase__ :Dict = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCamelCase__ :Dict = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(UpperCamelCase_ , str(metrics[key] ) ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCamelCase__ :int = trainer.callback_metrics
# Log and save results to file
UpperCamelCase__ :Any = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(UpperCamelCase_ , '''w''' ) as writer:
for key in sorted(UpperCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(UpperCamelCase_ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(UpperCamelCase_ , str(metrics[key] ) ) )
def a ( __a , __a ) -> None:
'''simple docstring'''
parser.add_argument(
'''--output_dir''' , default=str(Path(__a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=__a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=__a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=__a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=__a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=__a , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(__a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=__a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def a ( __a , __a , __a=None , __a=True , __a=[] , __a=None , __a=None , **__a , ) -> str:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
UpperCamelCase__ :Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__a )
# add custom checkpoints
if checkpoint_callback is None:
UpperCamelCase__ :Any = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__a )
if logging_callback is None:
UpperCamelCase__ :str = LoggingCallback()
UpperCamelCase__ :Optional[Any] = {}
if args.fpaa:
UpperCamelCase__ :List[Any] = 16
if args.gpus > 1:
UpperCamelCase__ :Union[str, Any] = '''auto'''
UpperCamelCase__ :Optional[Any] = '''ddp'''
UpperCamelCase__ :List[str] = args.accumulate_grad_batches
UpperCamelCase__ :Dict = None
UpperCamelCase__ :Optional[Any] = '''auto'''
UpperCamelCase__ :List[str] = pl.Trainer.from_argparse_args(
__a , weights_summary=__a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__a , val_check_interval=1 , num_sanity_val_steps=2 , **__a , )
if args.do_train:
trainer.fit(__a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 97 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ :int = [2, 1, 2, -1]
UpperCamelCase__ :Tuple = [1, 2, 3, 4]
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = len(self.first_signal )
UpperCamelCase__ :Tuple = len(self.second_signal )
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , UpperCamelCase_ )
# create a zero matrix of max_length x max_length
UpperCamelCase__ :Dict = [[0] * max_length for i in range(UpperCamelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase_ ):
UpperCamelCase__ :Any = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase_ )
for j, item in enumerate(UpperCamelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase__ :str = np.matmul(np.transpose(UpperCamelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod() | 97 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class lowercase ( A__ , A__ ):
"""simple docstring"""
_a = 'resnet'
_a = ['basic', 'bottleneck']
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=[256, 512, 1024, 2048] , UpperCamelCase_=[3, 4, 6, 3] , UpperCamelCase_="bottleneck" , UpperCamelCase_="relu" , UpperCamelCase_=False , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
UpperCamelCase__ :Optional[Any] = num_channels
UpperCamelCase__ :int = embedding_size
UpperCamelCase__ :Dict = hidden_sizes
UpperCamelCase__ :int = depths
UpperCamelCase__ :int = layer_type
UpperCamelCase__ :List[Any] = hidden_act
UpperCamelCase__ :Optional[int] = downsample_in_first_stage
UpperCamelCase__ :Tuple = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :Tuple = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
class lowercase ( A__ ):
"""simple docstring"""
_a = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 1e-3 | 97 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( __a , __a ) -> set[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = set(__a ), [start]
while stack:
UpperCamelCase__ :Union[str, Any] = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
__snake_case = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A''')) | 97 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = 10
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = [1, 2, 3, 4]
UpperCamelCase__ :Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCamelCase__ :Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCamelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCamelCase__ , UpperCamelCase__ :int = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = ''''''
UpperCamelCase__ , UpperCamelCase__ :List[str] = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCamelCase__ , UpperCamelCase__ :Tuple = process_story(UpperCamelCase_ )
UpperCamelCase__ :Any = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :str = ['''It was the best of times.''']
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = torch.tensor([1, 2, 3, 4] )
UpperCamelCase__ :Optional[int] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCamelCase__ :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCamelCase__ :Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = 101
UpperCamelCase__ :List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCamelCase__ :Tuple = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCamelCase__ :Any = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ ) | 97 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_a = StableDiffusionLDMaDPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCamelCase__ :Tuple = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase__ :Optional[int] = CLIPTextModel(UpperCamelCase_ )
UpperCamelCase__ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ :Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('''mps''' ):
UpperCamelCase__ :Dict = torch.manual_seed(UpperCamelCase_ )
else:
UpperCamelCase__ :List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase__ :List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :List[Any] = self.get_dummy_components()
UpperCamelCase__ :Any = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
UpperCamelCase__ :str = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :int = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :str = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = output.rgb, output.depth
UpperCamelCase__ :List[Any] = rgb[0, -3:, -3:, -1]
UpperCamelCase__ :Optional[int] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase__ :Any = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
UpperCamelCase__ :Optional[int] = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_dummy_components()
UpperCamelCase__ :Optional[Any] = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
UpperCamelCase__ :Dict = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :str = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = 3 * [inputs['''prompt''']]
# forward
UpperCamelCase__ :int = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Tuple = output.rgb, output.depth
UpperCamelCase__ :Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase__ :Union[str, Any] = depth_slice_a[0, -3:, -1]
UpperCamelCase__ :Dict = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = 3 * [inputs.pop('''prompt''' )]
UpperCamelCase__ :str = ldmad_pipe.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :Optional[Any] = text_inputs['''input_ids'''].to(UpperCamelCase_ )
UpperCamelCase__ :str = ldmad_pipe.text_encoder(UpperCamelCase_ )[0]
UpperCamelCase__ :Optional[int] = prompt_embeds
# forward
UpperCamelCase__ :Tuple = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :int = output.rgb, output.depth
UpperCamelCase__ :List[Any] = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase__ :List[Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :List[str] = self.get_dummy_components()
UpperCamelCase__ :Tuple = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
UpperCamelCase__ :List[str] = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
UpperCamelCase__ :int = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = '''french fries'''
UpperCamelCase__ :List[Any] = ldmad_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = output.rgb, output.depth
UpperCamelCase__ :Tuple = rgb[0, -3:, -3:, -1]
UpperCamelCase__ :Any = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase__ :Union[str, Any] = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
UpperCamelCase__ :List[str] = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_="cpu" , UpperCamelCase_=torch.floataa , UpperCamelCase_=0 ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase__ :int = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase__ :Any = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
UpperCamelCase__ :int = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = self.get_inputs(UpperCamelCase_ )
UpperCamelCase__ :Tuple = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :List[str] = output.rgb, output.depth
UpperCamelCase__ :List[str] = rgb[0, -3:, -3:, -1].flatten()
UpperCamelCase__ :Optional[int] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCamelCase__ :List[str] = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
UpperCamelCase__ :Optional[int] = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_="cpu" , UpperCamelCase_=torch.floataa , UpperCamelCase_=0 ):
'''simple docstring'''
UpperCamelCase__ :List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase__ :List[str] = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase__ :Any = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
UpperCamelCase__ :List[str] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Any = self.get_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = output.rgb, output.depth
UpperCamelCase__ :Union[str, Any] = 0.495586
UpperCamelCase__ :List[str] = 0.33795515
UpperCamelCase__ :Tuple = 112.48518
UpperCamelCase__ :int = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Dict = self.get_inputs(UpperCamelCase_ )
UpperCamelCase__ :Any = ldmad_pipe(**UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Dict = output.rgb, output.depth
UpperCamelCase__ :int = 0.4194127
UpperCamelCase__ :Optional[int] = 0.35375586
UpperCamelCase__ :int = 0.5638502
UpperCamelCase__ :Optional[int] = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3 | 97 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ :int = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ :Tuple = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCamelCase__ :Tuple = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
UpperCamelCase__ :List[Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
UpperCamelCase__ :str = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ :int = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.get_tokenizer()
UpperCamelCase__ :str = self.get_rust_tokenizer()
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :Dict = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ :str = self.get_image_processor(do_normalize=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :Tuple = self.get_tokenizer()
UpperCamelCase__ :Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :str = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.get_image_processor()
UpperCamelCase__ :Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ :List[str] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Any = '''lower newer'''
UpperCamelCase__ :List[str] = processor(text=UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :Union[str, Any] = tokenizer(UpperCamelCase_ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
UpperCamelCase__ :List[Any] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :int = '''lower newer'''
UpperCamelCase__ :List[str] = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = '''google/owlvit-base-patch32'''
UpperCamelCase__ :Any = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = ['''cat''', '''nasa badge''']
UpperCamelCase__ :List[Any] = processor(text=UpperCamelCase_ )
UpperCamelCase__ :List[str] = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = '''google/owlvit-base-patch32'''
UpperCamelCase__ :Optional[Any] = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Any = [['''cat''', '''nasa badge'''], ['''person''']]
UpperCamelCase__ :List[str] = processor(text=UpperCamelCase_ )
UpperCamelCase__ :int = 16
UpperCamelCase__ :Optional[int] = len(UpperCamelCase_ )
UpperCamelCase__ :int = max([len(UpperCamelCase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = '''google/owlvit-base-patch32'''
UpperCamelCase__ :List[Any] = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :str = ['''cat''', '''nasa badge''']
UpperCamelCase__ :Union[str, Any] = processor(text=UpperCamelCase_ )
UpperCamelCase__ :Dict = 16
UpperCamelCase__ :str = inputs['''input_ids''']
UpperCamelCase__ :Union[str, Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.get_image_processor()
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :List[str] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ :Union[str, Any] = self.prepare_image_inputs()
UpperCamelCase__ :Any = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.get_image_processor()
UpperCamelCase__ :Dict = self.get_tokenizer()
UpperCamelCase__ :Dict = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ :Tuple = processor.batch_decode(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
'''simple docstring'''
__snake_case = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
__snake_case = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def a ( __a , __a , __a ) -> float:
'''simple docstring'''
UpperCamelCase__ :int = from_type.lower().strip('''s''' )
UpperCamelCase__ :Tuple = to_type.lower().strip('''s''' )
UpperCamelCase__ :str = UNIT_SYMBOL.get(__a , __a )
UpperCamelCase__ :Dict = UNIT_SYMBOL.get(__a , __a )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ :Dict = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(__a )}'''
)
raise ValueError(__a )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ :int = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(__a )}'''
)
raise ValueError(__a )
UpperCamelCase__ :Optional[int] = METRIC_CONVERSION[from_sanitized]
UpperCamelCase__ :Any = METRIC_CONVERSION[to_sanitized]
UpperCamelCase__ :Union[str, Any] = 1
if from_exponent > to_exponent:
UpperCamelCase__ :List[str] = from_exponent - to_exponent
else:
UpperCamelCase__ :Dict = -(to_exponent - from_exponent)
return value * pow(10 , __a )
if __name__ == "__main__":
from doctest import testmod
testmod() | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
'''simple docstring'''
__snake_case = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
} | 97 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = np.random.default_rng(UpperCamelCase_ )
UpperCamelCase__ :Tuple = length
UpperCamelCase__ :Optional[int] = rng.normal(size=(length,) ).astype(np.floataa )
UpperCamelCase__ :str = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , UpperCamelCase_ ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ :List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ :Tuple = True
def lowerCAmelCase__ ( self , UpperCamelCase_=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCamelCase__ :Tuple = False
return x * self.a[0] + self.b[0]
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :List[str] = torch.nn.Parameter(torch.tensor(UpperCamelCase_ ).float() )
UpperCamelCase__ :List[Any] = torch.nn.Parameter(torch.tensor(UpperCamelCase_ ).float() )
UpperCamelCase__ :str = True
def lowerCAmelCase__ ( self , UpperCamelCase_=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCamelCase__ :Optional[int] = False
return x * self.a + self.b
def a ( __a , __a = 16 ) -> Union[str, Any]:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCamelCase__ :Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ :Tuple = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCamelCase__ :Dict = load_dataset('''csv''' , data_files=__a )
UpperCamelCase__ :int = datasets['''train'''].unique('''label''' )
UpperCamelCase__ :List[str] = {v: i for i, v in enumerate(__a )}
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Any = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a , padding='''max_length''' )
if "label" in examples:
UpperCamelCase__ :str = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :str = datasets.map(
__a , batched=__a , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ :int = DataLoader(tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=2 )
UpperCamelCase__ :Dict = DataLoader(tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=1 )
return train_dataloader, eval_dataloader | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
'''simple docstring'''
def a ( __a , __a ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(__a ) , __a )
return number - int(__a )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__snake_case = logging.get_logger(__name__)
def a ( __a , __a , __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :Dict = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__a ) == len(__a ), f'''{len(__a )} != {len(__a )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__snake_case = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__snake_case = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a ( __a , __a ) -> Any:
'''simple docstring'''
try:
UpperCamelCase__ :List[str] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(__a ) )
def a ( __a , __a ) -> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(__a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a ( __a , __a = "student" , __a = None , __a = None , __a=False , __a=None , __a=None , **__a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(__a , __a ):
AutoTokenizer.from_pretrained(__a ).save_pretrained(__a ) # purely for convenience
UpperCamelCase__ :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__a ).eval()
else:
assert isinstance(__a , __a ), f'''teacher must be a model or string got type {type(__a )}'''
UpperCamelCase__ :Optional[int] = teacher.config.to_diff_dict()
try:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCamelCase__ :Dict = teacher_e
if d is None:
UpperCamelCase__ :Tuple = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
UpperCamelCase__ , UpperCamelCase__ :Dict = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCamelCase__ , UpperCamelCase__ :int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCamelCase__ :List[str] = teacher_e
if d is None:
UpperCamelCase__ :Dict = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__a )
# Copy weights
UpperCamelCase__ :Union[str, Any] = teacher.config_class(**__a )
UpperCamelCase__ :str = AutoModelForSeqaSeqLM.from_config(__a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCamelCase__ :Tuple = student.load_state_dict(teacher.state_dict() , strict=__a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCamelCase__ , UpperCamelCase__ :Any = list(range(__a ) ), list(range(__a ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(__a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCamelCase__ :List[int] = pick_layers_to_copy(__a , __a )
if d_layers_to_copy is None:
UpperCamelCase__ :List[int] = pick_layers_to_copy(__a , __a )
try:
if hasattr(
__a , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __a )
copy_layers(teacher.decoder.block , student.decoder.block , __a )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
UpperCamelCase__ :Union[str, Any] = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(__a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 97 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__snake_case = pd.read_csv('''sample_data.csv''', header=None)
__snake_case = df.shape[:1][0]
# If you're using some other dataset input the target column
__snake_case = df.iloc[:, 1:2]
__snake_case = actual_data.values.reshape(len_data, 1)
__snake_case = MinMaxScaler().fit_transform(actual_data)
__snake_case = 10
__snake_case = 5
__snake_case = 20
__snake_case = len_data - periods * look_back
__snake_case = actual_data[:division]
__snake_case = actual_data[division - look_back :]
__snake_case , __snake_case = [], []
__snake_case , __snake_case = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__snake_case = np.array(train_x)
__snake_case = np.array(test_x)
__snake_case = np.array([list(i.ravel()) for i in train_y])
__snake_case = np.array([list(i.ravel()) for i in test_y])
__snake_case = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
__snake_case = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__snake_case = model.predict(x_test) | 97 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 1 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__snake_case = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__snake_case = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__snake_case = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ ) ),
} | 97 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__snake_case = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__snake_case = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
__snake_case = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = CHRF.CHAR_ORDER , UpperCamelCase_ = CHRF.WORD_ORDER , UpperCamelCase_ = CHRF.BETA , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = len(references[0] )
if any(len(UpperCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
UpperCamelCase__ :Optional[Any] = [[refs[i] for refs in references] for i in range(UpperCamelCase_ )]
UpperCamelCase__ :str = CHRF(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = sb_chrf.corpus_score(UpperCamelCase_ , UpperCamelCase_ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
} | 97 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 | 1 |
'''simple docstring'''
import pytest
__snake_case = '''__dummy_dataset1__'''
__snake_case = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ) -> List[str]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ) -> List[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = dataset_loading_script_name
UpperCamelCase__ :Optional[int] = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__a )
UpperCamelCase__ :List[str] = script_dir / f'''{script_name}.py'''
with open(__a , '''w''' ) as f:
f.write(__a )
return str(__a ) | 97 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 1 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
UpperCamelCase__ :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase__ :str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCamelCase__ :Optional[int] = np.concatenate(__a , axis=0 )
UpperCamelCase__ :List[str] = np.array(__a ).astype(np.floataa ) / 2_5_5.0
UpperCamelCase__ :int = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase__ :List[str] = 2.0 * image - 1.0
UpperCamelCase__ :int = torch.from_numpy(__a )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase__ :int = torch.cat(__a , dim=0 )
return image
def a ( __a , __a , __a , __a=0.9_9_9_5 ) -> Dict:
'''simple docstring'''
if not isinstance(__a , np.ndarray ):
UpperCamelCase__ :str = True
UpperCamelCase__ :Optional[int] = va.device
UpperCamelCase__ :List[str] = va.cpu().numpy()
UpperCamelCase__ :int = va.cpu().numpy()
UpperCamelCase__ :List[str] = np.sum(va * va / (np.linalg.norm(__a ) * np.linalg.norm(__a )) )
if np.abs(__a ) > DOT_THRESHOLD:
UpperCamelCase__ :List[str] = (1 - t) * va + t * va
else:
UpperCamelCase__ :Union[str, Any] = np.arccos(__a )
UpperCamelCase__ :Optional[int] = np.sin(__a )
UpperCamelCase__ :Dict = theta_a * t
UpperCamelCase__ :Optional[Any] = np.sin(__a )
UpperCamelCase__ :Union[str, Any] = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase__ :str = sin_theta_t / sin_theta_a
UpperCamelCase__ :Optional[int] = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase__ :List[str] = torch.from_numpy(__a ).to(__a )
return va
def a ( __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :str = F.normalize(__a , dim=-1 )
UpperCamelCase__ :Any = F.normalize(__a , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a ( __a , __a ) -> Any:
'''simple docstring'''
for param in model.parameters():
UpperCamelCase__ :Any = value
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , clip_model=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , coca_model=UpperCamelCase_ , coca_tokenizer=UpperCamelCase_ , coca_transform=UpperCamelCase_ , )
UpperCamelCase__ :List[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCamelCase_ )
else feature_extractor.size['''shortest_edge''']
)
UpperCamelCase__ :List[str] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCamelCase_ )
set_requires_grad(self.clip_model , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
set_requires_grad(self.vae , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
set_requires_grad(self.vae , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
set_requires_grad(self.unet , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
set_requires_grad(self.unet , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = min(int(num_inference_steps * strength ) , UpperCamelCase_ )
UpperCamelCase__ :Dict = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ :int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase_ )}''' )
UpperCamelCase__ :List[str] = image.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase_ )
]
UpperCamelCase__ :str = torch.cat(UpperCamelCase_ , dim=0 )
else:
UpperCamelCase__ :Tuple = self.vae.encode(UpperCamelCase_ ).latent_dist.sample(UpperCamelCase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ :str = 0.18215 * init_latents
UpperCamelCase__ :Optional[Any] = init_latents.repeat_interleave(UpperCamelCase_ , dim=0 )
UpperCamelCase__ :Any = randn_tensor(init_latents.shape , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
# get latents
UpperCamelCase__ :Tuple = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = init_latents
return latents
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = self.coca_transform(UpperCamelCase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase__ :List[str] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase__ :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.feature_extractor.preprocess(UpperCamelCase_ )
UpperCamelCase__ :Any = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase__ :str = self.clip_model.get_image_features(UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = image_embeddings_clip.repeat_interleave(UpperCamelCase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = latents.detach().requires_grad_()
UpperCamelCase__ :List[Any] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
UpperCamelCase__ :List[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase__ :Union[str, Any] = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase__ :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase__ :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase__ :Any = torch.sqrt(UpperCamelCase_ )
UpperCamelCase__ :Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCamelCase_ ):
UpperCamelCase__ :Any = self.scheduler.sigmas[index]
UpperCamelCase__ :Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ :Dict = 1 / 0.18215 * sample
UpperCamelCase__ :List[Any] = self.vae.decode(UpperCamelCase_ ).sample
UpperCamelCase__ :Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ :List[Any] = transforms.Resize(self.feature_extractor_size )(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.normalize(UpperCamelCase_ ).to(latents.dtype )
UpperCamelCase__ :Union[str, Any] = self.clip_model.get_image_features(UpperCamelCase_ )
UpperCamelCase__ :Any = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase_ )
UpperCamelCase__ :Any = spherical_dist_loss(UpperCamelCase_ , UpperCamelCase_ ).mean() * clip_guidance_scale
UpperCamelCase__ :str = -torch.autograd.grad(UpperCamelCase_ , UpperCamelCase_ )[0]
if isinstance(self.scheduler , UpperCamelCase_ ):
UpperCamelCase__ :List[Any] = latents.detach() + grads * (sigma**2)
UpperCamelCase__ :Optional[int] = noise_pred_original
else:
UpperCamelCase__ :Tuple = noise_pred_original - torch.sqrt(UpperCamelCase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 512 , UpperCamelCase_ = 512 , UpperCamelCase_ = 0.6 , UpperCamelCase_ = 50 , UpperCamelCase_ = 7.5 , UpperCamelCase_ = 1 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 100 , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , UpperCamelCase_ = 0.8 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(UpperCamelCase_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(UpperCamelCase_ , torch.Generator ) and batch_size > 1:
UpperCamelCase__ :List[Any] = [generator] + [None] * (batch_size - 1)
UpperCamelCase__ :Union[str, Any] = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCamelCase__ :Union[str, Any] = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase__ :Union[str, Any] = ''', '''.join(UpperCamelCase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCamelCase_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase__ :List[str] = self.get_image_description(UpperCamelCase_ )
if style_prompt is None:
if len(UpperCamelCase_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase__ :List[Any] = self.get_image_description(UpperCamelCase_ )
# get prompt text embeddings for content and style
UpperCamelCase__ :Union[str, Any] = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :Any = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase__ :Any = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase__ :str = slerp(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase__ :List[str] = text_embeddings.repeat_interleave(UpperCamelCase_ , dim=0 )
# set timesteps
UpperCamelCase__ :Dict = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase__ :Any = {}
if accepts_offset:
UpperCamelCase__ :Tuple = 1
self.scheduler.set_timesteps(UpperCamelCase_ , **UpperCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase__ , UpperCamelCase__ :int = self.get_timesteps(UpperCamelCase_ , UpperCamelCase_ , self.device )
UpperCamelCase__ :Optional[Any] = timesteps[:1].repeat(UpperCamelCase_ )
# Preprocess image
UpperCamelCase__ :Any = preprocess(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Dict = self.prepare_latents(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text_embeddings.dtype , self.device , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = preprocess(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = self.prepare_latents(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text_embeddings.dtype , self.device , UpperCamelCase_ )
UpperCamelCase__ :int = slerp(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if clip_guidance_scale > 0:
UpperCamelCase__ :Optional[Any] = self.get_clip_image_embeddings(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.get_clip_image_embeddings(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Dict = slerp(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase__ :Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ :str = content_text_input.input_ids.shape[-1]
UpperCamelCase__ :List[str] = self.tokenizer([''''''] , padding='''max_length''' , max_length=UpperCamelCase_ , return_tensors='''pt''' )
UpperCamelCase__ :Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase__ :Optional[Any] = uncond_embeddings.repeat_interleave(UpperCamelCase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ :Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase__ :Optional[int] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase__ :Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase__ :Any = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device='''cpu''' , dtype=UpperCamelCase_ ).to(
self.device )
else:
UpperCamelCase__ :int = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase__ :Union[str, Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ :Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ :Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ :Any = {}
if accepts_eta:
UpperCamelCase__ :Union[str, Any] = eta
# check if the scheduler accepts generator
UpperCamelCase__ :int = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase__ :List[Any] = generator
with self.progress_bar(total=UpperCamelCase_ ):
for i, t in enumerate(UpperCamelCase_ ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ :str = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
UpperCamelCase__ :str = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ :List[str] = noise_pred.chunk(2 )
UpperCamelCase__ :Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase__ :Dict = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.cond_fn(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ :Dict = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ :Any = 1 / 0.18215 * latents
UpperCamelCase__ :Union[str, Any] = self.vae.decode(UpperCamelCase_ ).sample
UpperCamelCase__ :Any = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ :Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCamelCase_ , nsfw_content_detected=UpperCamelCase_ ) | 97 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__snake_case = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
__snake_case = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__snake_case = '''▁'''
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
UpperCamelCase__ :Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
UpperCamelCase__ :List[str] = vocab_file
UpperCamelCase__ :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
UpperCamelCase__ :Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCamelCase__ :Optional[int] = len(self.sp_model ) - 1
UpperCamelCase__ :Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ :Optional[int] = [self.cls_token_id]
UpperCamelCase__ :List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = [self.sep_token_id]
UpperCamelCase__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ :List[Any] = self.sp_model.PieceToId(UpperCamelCase_ )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = []
UpperCamelCase__ :Optional[int] = ''''''
UpperCamelCase__ :str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :Any = []
else:
current_sub_tokens.append(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.__dict__.copy()
UpperCamelCase__ :str = None
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :List[str] = {}
UpperCamelCase__ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
UpperCamelCase__ :Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,) | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__snake_case = '''scheduler_config.json'''
class lowercase ( A__ ):
"""simple docstring"""
_a = 1
_a = 2
_a = 3
_a = 4
_a = 5
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = 42
class lowercase :
"""simple docstring"""
_a = SCHEDULER_CONFIG_NAME
_a = ['dtype']
_a = []
_a = True
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=False , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase_ , subfolder=UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = cls.from_config(UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ , **UpperCamelCase_ )
if hasattr(UpperCamelCase_ , '''create_state''' ) and getattr(UpperCamelCase_ , '''has_state''' , UpperCamelCase_ ):
UpperCamelCase__ :str = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = False , **UpperCamelCase_ ):
'''simple docstring'''
self.save_config(save_directory=UpperCamelCase_ , push_to_hub=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase__ :Union[str, Any] = importlib.import_module(__name__.split('''.''' )[0] )
UpperCamelCase__ :int = [
getattr(UpperCamelCase_ , UpperCamelCase_ ) for c in compatible_classes_str if hasattr(UpperCamelCase_ , UpperCamelCase_ )
]
return compatible_classes
def a ( __a , __a ) -> jnp.ndarray:
'''simple docstring'''
assert len(__a ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__a ) - x.ndim) ) , __a )
def a ( __a , __a=0.9_9_9 , __a=jnp.floataa ) -> jnp.ndarray:
'''simple docstring'''
def alpha_bar(__a ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
UpperCamelCase__ :Tuple = []
for i in range(__a ):
UpperCamelCase__ :List[Any] = i / num_diffusion_timesteps
UpperCamelCase__ :Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__a ) / alpha_bar(__a ) , __a ) )
return jnp.array(__a , dtype=__a )
@flax.struct.dataclass
class lowercase :
"""simple docstring"""
_a = 42
_a = 42
_a = 42
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = scheduler.config
if config.trained_betas is not None:
UpperCamelCase__ :Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase__ :int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase__ :Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase__ :Tuple = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCamelCase__ :List[Any] = 1.0 - betas
UpperCamelCase__ :Optional[Any] = jnp.cumprod(UpperCamelCase_ , axis=0 )
return cls(
alphas=UpperCamelCase_ , betas=UpperCamelCase_ , alphas_cumprod=UpperCamelCase_ , )
def a ( __a , __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Tuple = state.alphas_cumprod
UpperCamelCase__ :Optional[int] = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase__ :Tuple = sqrt_alpha_prod.flatten()
UpperCamelCase__ :int = broadcast_to_shape_from_left(__a , original_samples.shape )
UpperCamelCase__ :Dict = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase__ :Any = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase__ :Union[str, Any] = broadcast_to_shape_from_left(__a , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def a ( __a , __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Tuple = get_sqrt_alpha_prod(__a , __a , __a , __a )
UpperCamelCase__ :List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def a ( __a , __a , __a , __a ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_sqrt_alpha_prod(__a , __a , __a , __a )
UpperCamelCase__ :Optional[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity | 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 1 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__snake_case = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__snake_case = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__snake_case = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1 , UpperCamelCase_ = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
} | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ = 101 ):
'''simple docstring'''
UpperCamelCase__ :int = length
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , UpperCamelCase_ ):
'''simple docstring'''
return i
class lowercase :
"""simple docstring"""
def __call__( self , UpperCamelCase_ ):
'''simple docstring'''
return {"input_ids": torch.tensor(UpperCamelCase_ ), "labels": torch.tensor(UpperCamelCase_ )}
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase__ :List[Any] = nn.Linear(120 , 80 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowercase ( A__ ):
"""simple docstring"""
@require_torch_neuroncore
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
UpperCamelCase__ :Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :Tuple = F'''--output_dir {output_dir}'''.split()
UpperCamelCase__ :Tuple = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase ( A__ ):
"""simple docstring"""
@require_torch_multi_gpu
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
UpperCamelCase__ :int = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :List[Any] = F'''--output_dir {output_dir}'''.split()
UpperCamelCase__ :str = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__snake_case = HfArgumentParser((TrainingArguments,))
__snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__snake_case = DummyDataset(dataset_length)
def a ( __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[Any] = list(range(len(__a ) ) )
UpperCamelCase__ :Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__snake_case = 2
__snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__snake_case = None | 97 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( A__ ):
"""simple docstring"""
_a = (DDPMScheduler,)
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.scheduler_classes[0]
UpperCamelCase__ :Tuple = self.get_scheduler_config()
UpperCamelCase__ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.scheduler_classes[0]
UpperCamelCase__ :Any = self.get_scheduler_config()
UpperCamelCase__ :int = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Dict = len(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.dummy_model()
UpperCamelCase__ :int = self.dummy_sample_deter
UpperCamelCase__ :Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
UpperCamelCase__ :Dict = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ :Any = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase__ :Optional[int] = pred_prev_sample
UpperCamelCase__ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
UpperCamelCase__ :Optional[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.scheduler_classes[0]
UpperCamelCase__ :Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCamelCase__ :List[Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = len(UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.dummy_model()
UpperCamelCase__ :Optional[int] = self.dummy_sample_deter
UpperCamelCase__ :Tuple = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
UpperCamelCase__ :Tuple = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ :Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase__ :List[str] = pred_prev_sample
UpperCamelCase__ :List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
UpperCamelCase__ :Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.scheduler_classes[0]
UpperCamelCase__ :Optional[Any] = self.get_scheduler_config()
UpperCamelCase__ :List[Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
UpperCamelCase__ :Tuple = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
UpperCamelCase__ :List[str] = -1
else:
UpperCamelCase__ :List[str] = timesteps[i + 1]
UpperCamelCase__ :Optional[int] = scheduler.previous_timestep(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.scheduler_classes[0]
UpperCamelCase__ :List[Any] = self.get_scheduler_config()
UpperCamelCase__ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase__ :Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = [100, 87, 50, 1, 0]
UpperCamelCase__ :List[Any] = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.scheduler_classes[0]
UpperCamelCase__ :Any = self.get_scheduler_config()
UpperCamelCase__ :Dict = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ ) | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
'''simple docstring'''
def a ( __a , __a , __a , __a ) -> str:
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , __a , __a , __a )
move_disk(__a , __a )
move_tower(height - 1 , __a , __a , __a )
def a ( __a , __a ) -> str:
'''simple docstring'''
print('''moving disk from''' , __a , '''to''' , __a )
def a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = int(input('''Height of hanoi: ''' ).strip() )
move_tower(__a , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 1 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__snake_case = 637_8137.0
__snake_case = 635_6752.31_4245
__snake_case = 6378137
def a ( __a , __a , __a , __a ) -> float:
'''simple docstring'''
UpperCamelCase__ :Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCamelCase__ :Optional[Any] = atan((1 - flattening) * tan(radians(__a ) ) )
UpperCamelCase__ :List[str] = atan((1 - flattening) * tan(radians(__a ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCamelCase__ :Dict = haversine_distance(__a , __a , __a , __a ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCamelCase__ :Tuple = (b_lata + b_lata) / 2
UpperCamelCase__ :Union[str, Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCamelCase__ :Tuple = (sin(__a ) ** 2) * (cos(__a ) ** 2)
UpperCamelCase__ :List[Any] = cos(sigma / 2 ) ** 2
UpperCamelCase__ :Optional[Any] = (sigma - sin(__a )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCamelCase__ :Union[str, Any] = (cos(__a ) ** 2) * (sin(__a ) ** 2)
UpperCamelCase__ :List[str] = sin(sigma / 2 ) ** 2
UpperCamelCase__ :int = (sigma + sin(__a )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 1 |
'''simple docstring'''
def a ( __a , __a , __a ) -> float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def a ( __a , __a , __a ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def a ( __a , __a , __a ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def a ( __a , __a , __a ) -> float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0.0 , UpperCamelCase_ = None , UpperCamelCase_ = "geglu" , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = "layer_norm" , UpperCamelCase_ = False , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Dict = only_cross_attention
UpperCamelCase__ :Tuple = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
UpperCamelCase__ :str = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
UpperCamelCase__ :Tuple = AdaLayerNorm(UpperCamelCase_ , UpperCamelCase_ )
elif self.use_ada_layer_norm_zero:
UpperCamelCase__ :Dict = AdaLayerNormZero(UpperCamelCase_ , UpperCamelCase_ )
else:
UpperCamelCase__ :List[str] = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = Attention(
query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , dropout=UpperCamelCase_ , bias=UpperCamelCase_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
UpperCamelCase__ :Optional[int] = (
AdaLayerNorm(UpperCamelCase_ , UpperCamelCase_ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
)
UpperCamelCase__ :List[str] = Attention(
query_dim=UpperCamelCase_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , dropout=UpperCamelCase_ , bias=UpperCamelCase_ , upcast_attention=UpperCamelCase_ , ) # is self-attn if encoder_hidden_states is none
else:
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :int = None
# 3. Feed-forward
UpperCamelCase__ :List[Any] = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
UpperCamelCase__ :str = FeedForward(UpperCamelCase_ , dropout=UpperCamelCase_ , activation_fn=UpperCamelCase_ , final_dropout=UpperCamelCase_ )
# let chunk size default to None
UpperCamelCase__ :List[Any] = None
UpperCamelCase__ :Any = 0
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = chunk_size
UpperCamelCase__ :str = dim
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
UpperCamelCase__ :int = self.norma(UpperCamelCase_ , UpperCamelCase_ )
elif self.use_ada_layer_norm_zero:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.norma(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hidden_dtype=hidden_states.dtype )
else:
UpperCamelCase__ :Any = self.norma(UpperCamelCase_ )
UpperCamelCase__ :List[str] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
UpperCamelCase__ :int = self.attna(
UpperCamelCase_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
if self.use_ada_layer_norm_zero:
UpperCamelCase__ :int = gate_msa.unsqueeze(1 ) * attn_output
UpperCamelCase__ :Optional[int] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
UpperCamelCase__ :str = (
self.norma(UpperCamelCase_ , UpperCamelCase_ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase_ )
)
UpperCamelCase__ :Any = self.attna(
UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Tuple = attn_output + hidden_states
# 3. Feed-forward
UpperCamelCase__ :List[Any] = self.norma(UpperCamelCase_ )
if self.use_ada_layer_norm_zero:
UpperCamelCase__ :str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
UpperCamelCase__ :List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
UpperCamelCase__ :Tuple = torch.cat(
[self.ff(UpperCamelCase_ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
UpperCamelCase__ :Optional[Any] = self.ff(UpperCamelCase_ )
if self.use_ada_layer_norm_zero:
UpperCamelCase__ :Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
UpperCamelCase__ :Optional[Any] = ff_output + hidden_states
return hidden_states
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 4 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = "geglu" , UpperCamelCase_ = False , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Tuple = int(dim * mult )
UpperCamelCase__ :List[str] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
UpperCamelCase__ :Optional[int] = GELU(UpperCamelCase_ , UpperCamelCase_ )
if activation_fn == "gelu-approximate":
UpperCamelCase__ :Any = GELU(UpperCamelCase_ , UpperCamelCase_ , approximate='''tanh''' )
elif activation_fn == "geglu":
UpperCamelCase__ :int = GEGLU(UpperCamelCase_ , UpperCamelCase_ )
elif activation_fn == "geglu-approximate":
UpperCamelCase__ :Tuple = ApproximateGELU(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :str = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase_ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase_ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for module in self.net:
UpperCamelCase__ :Optional[int] = module(UpperCamelCase_ )
return hidden_states
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = "none" ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = approximate
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(UpperCamelCase_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.proj(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.gelu(UpperCamelCase_ )
return hidden_states
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Optional[Any] = nn.Linear(UpperCamelCase_ , dim_out * 2 )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(UpperCamelCase_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Any = self.proj(UpperCamelCase_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase_ )
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :List[str] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.proj(UpperCamelCase_ )
return x * torch.sigmoid(1.702 * x )
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Union[str, Any] = nn.Embedding(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[str] = nn.SiLU()
UpperCamelCase__ :Union[str, Any] = nn.Linear(UpperCamelCase_ , embedding_dim * 2 )
UpperCamelCase__ :Optional[Any] = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.linear(self.silu(self.emb(UpperCamelCase_ ) ) )
UpperCamelCase__ , UpperCamelCase__ :List[str] = torch.chunk(UpperCamelCase_ , 2 )
UpperCamelCase__ :Union[str, Any] = self.norm(UpperCamelCase_ ) * (1 + scale) + shift
return x
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :List[str] = CombinedTimestepLabelEmbeddings(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = nn.SiLU()
UpperCamelCase__ :Tuple = nn.Linear(UpperCamelCase_ , 6 * embedding_dim , bias=UpperCamelCase_ )
UpperCamelCase__ :str = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ , eps=1e-6 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :int = self.linear(self.silu(self.emb(UpperCamelCase_ , UpperCamelCase_ , hidden_dtype=UpperCamelCase_ ) ) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = emb.chunk(6 , dim=1 )
UpperCamelCase__ :List[str] = self.norm(UpperCamelCase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 1e-5 ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :List[Any] = num_groups
UpperCamelCase__ :Optional[Any] = eps
if act_fn is None:
UpperCamelCase__ :Tuple = None
else:
UpperCamelCase__ :List[str] = get_activation(UpperCamelCase_ )
UpperCamelCase__ :Dict = nn.Linear(UpperCamelCase_ , out_dim * 2 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if self.act:
UpperCamelCase__ :int = self.act(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = self.linear(UpperCamelCase_ )
UpperCamelCase__ :str = emb[:, :, None, None]
UpperCamelCase__ , UpperCamelCase__ :str = emb.chunk(2 , dim=1 )
UpperCamelCase__ :str = F.group_norm(UpperCamelCase_ , self.num_groups , eps=self.eps )
UpperCamelCase__ :Optional[Any] = x * (1 + scale) + shift
return x | 97 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 1 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def a ( __a , __a , __a , __a="attention" ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
UpperCamelCase__ :str = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase__ :int = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
UpperCamelCase__ :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase__ :List[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
UpperCamelCase__ :Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase__ :Tuple = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
UpperCamelCase__ :Tuple = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def a ( __a , __a , __a , __a=False ) -> List[str]:
'''simple docstring'''
if split_mlp_wi:
UpperCamelCase__ :Any = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
UpperCamelCase__ :Any = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
UpperCamelCase__ :List[Any] = (wi_a, wi_a)
else:
UpperCamelCase__ :Optional[Any] = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
UpperCamelCase__ :Optional[int] = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def a ( __a , __a , __a , __a ) -> Union[str, Any]:
'''simple docstring'''
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def a ( __a , *, __a , __a , __a = False ) -> str:
'''simple docstring'''
UpperCamelCase__ :str = traverse_util.flatten_dict(variables['''target'''] )
UpperCamelCase__ :Dict = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase__ :str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , __a )
UpperCamelCase__ :Any = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase__ :List[str] = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ :Any = tax_layer_norm_lookup(__a , __a , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = tax_attention_lookup(__a , __a , '''encoder''' , '''attention''' )
UpperCamelCase__ :str = layer_norm
UpperCamelCase__ :int = k.T
UpperCamelCase__ :Union[str, Any] = o.T
UpperCamelCase__ :Optional[int] = q.T
UpperCamelCase__ :Tuple = v.T
# Block i, layer 1 (MLP).
UpperCamelCase__ :Optional[Any] = tax_layer_norm_lookup(__a , __a , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ :str = tax_mlp_lookup(__a , __a , '''encoder''' , __a )
UpperCamelCase__ :Any = layer_norm
if split_mlp_wi:
UpperCamelCase__ :List[Any] = wi[0].T
UpperCamelCase__ :Tuple = wi[1].T
else:
UpperCamelCase__ :Union[str, Any] = wi.T
UpperCamelCase__ :List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase__ :List[str] = tax_relpos_bias_lookup(
__a , __a , '''encoder''' ).T
UpperCamelCase__ :str = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
UpperCamelCase__ :Optional[int] = tax_relpos_bias_lookup(
__a , 0 , '''encoder''' ).T
UpperCamelCase__ :int = tax_relpos_bias_lookup(
__a , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ :Dict = tax_layer_norm_lookup(__a , __a , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = tax_attention_lookup(__a , __a , '''decoder''' , '''self_attention''' )
UpperCamelCase__ :List[str] = layer_norm
UpperCamelCase__ :Optional[int] = k.T
UpperCamelCase__ :Union[str, Any] = o.T
UpperCamelCase__ :Optional[int] = q.T
UpperCamelCase__ :str = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase__ :Union[str, Any] = tax_layer_norm_lookup(__a , __a , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = tax_attention_lookup(__a , __a , '''decoder''' , '''encoder_decoder_attention''' )
UpperCamelCase__ :Tuple = layer_norm
UpperCamelCase__ :List[str] = k.T
UpperCamelCase__ :List[str] = o.T
UpperCamelCase__ :str = q.T
UpperCamelCase__ :str = v.T
# Block i, layer 2 (MLP).
UpperCamelCase__ :Tuple = tax_layer_norm_lookup(__a , __a , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = tax_mlp_lookup(__a , __a , '''decoder''' , __a )
UpperCamelCase__ :Dict = layer_norm
if split_mlp_wi:
UpperCamelCase__ :List[Any] = wi[0].T
UpperCamelCase__ :Tuple = wi[1].T
else:
UpperCamelCase__ :Dict = wi.T
UpperCamelCase__ :Optional[int] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase__ :Any = tax_relpos_bias_lookup(__a , __a , '''decoder''' ).T
UpperCamelCase__ :Dict = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase__ :Tuple = old['''decoder/logits_dense/kernel'''].T
return new
def a ( __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ :List[str] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ :Tuple = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCamelCase__ :Tuple = state_dict['''shared.weight''']
return state_dict
def a ( __a , __a , __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :int = checkpoints.load_tax_checkpoint(__a )
UpperCamelCase__ :List[str] = convert_tax_to_pytorch(
__a , num_layers=config.num_layers , is_encoder_only=__a , scalable_attention=__a )
UpperCamelCase__ :List[str] = make_state_dict(__a , __a )
model.load_state_dict(__a , strict=__a )
def a ( __a , __a , __a , __a = False , __a = False , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :int = MTaConfig.from_json_file(__a )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase__ :Dict = UMTaEncoderModel(__a )
else:
UpperCamelCase__ :Optional[int] = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a , __a , __a , __a , __a )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__snake_case = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
) | 97 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = 1
UpperCamelCase__ :Tuple = 3
UpperCamelCase__ :Union[str, Any] = (32, 32)
UpperCamelCase__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :Dict = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :str = self.dummy_cond_unet_upscale
UpperCamelCase__ :Tuple = DDPMScheduler()
UpperCamelCase__ :List[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCamelCase__ :Union[str, Any] = self.dummy_vae
UpperCamelCase__ :Any = self.dummy_text_encoder
UpperCamelCase__ :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ :Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :Optional[int] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCamelCase__ :Dict = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
UpperCamelCase__ :str = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ :Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
UpperCamelCase__ :Optional[int] = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase__ :int = output.images
UpperCamelCase__ :Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
UpperCamelCase__ :str = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
UpperCamelCase__ :List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ :Dict = image_from_tuple[0, -3:, -3:, -1]
UpperCamelCase__ :Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCamelCase__ :Tuple = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :List[str] = self.dummy_cond_unet_upscale
UpperCamelCase__ :Tuple = DDPMScheduler()
UpperCamelCase__ :Optional[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCamelCase__ :Dict = self.dummy_vae
UpperCamelCase__ :Any = self.dummy_text_encoder
UpperCamelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ :List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :Optional[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCamelCase__ :Dict = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
UpperCamelCase__ :int = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :int = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ :int = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase__ :List[str] = output.images
assert image.shape[0] == 2
UpperCamelCase__ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase__ :List[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.dummy_cond_unet_upscale
UpperCamelCase__ :List[Any] = DDPMScheduler()
UpperCamelCase__ :Optional[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCamelCase__ :Optional[int] = self.dummy_vae
UpperCamelCase__ :Union[str, Any] = self.dummy_text_encoder
UpperCamelCase__ :List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ :Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :Optional[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCamelCase__ :Optional[int] = unet.half()
UpperCamelCase__ :List[str] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ :Any = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
UpperCamelCase__ :Any = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Any = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Dict = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' , ).images
UpperCamelCase__ :Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCamelCase__ :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
UpperCamelCase__ :List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCamelCase__ :Any = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :Optional[Any] = '''a cat sitting on a park bench'''
UpperCamelCase__ :Optional[int] = torch.manual_seed(0 )
UpperCamelCase__ :List[str] = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , )
UpperCamelCase__ :Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCamelCase__ :Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
UpperCamelCase__ :Tuple = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCamelCase__ :int = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :int = '''a cat sitting on a park bench'''
UpperCamelCase__ :int = torch.manual_seed(0 )
UpperCamelCase__ :int = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , )
UpperCamelCase__ :Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ :Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCamelCase__ :Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCamelCase__ :List[Any] = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ :List[Any] = '''a cat sitting on a park bench'''
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , output_type='''np''' , )
UpperCamelCase__ :Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 97 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 1 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def a ( __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = math.sqrt(__a )
UpperCamelCase__ :Tuple = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def a ( __a , __a , __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def a ( __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __a ):
for j in range(0 , __a ):
UpperCamelCase__ :Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__a , __a )
def a ( __a , __a , __a , __a , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = np.zeros(img.shape )
UpperCamelCase__ :str = get_gauss_kernel(__a , __a )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
UpperCamelCase__ :str = get_slice(__a , __a , __a , __a )
UpperCamelCase__ :Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
UpperCamelCase__ :Optional[Any] = vec_gaussian(__a , __a )
UpperCamelCase__ :int = np.multiply(__a , __a )
UpperCamelCase__ :Optional[Any] = np.multiply(__a , __a )
UpperCamelCase__ :Dict = np.sum(__a ) / np.sum(__a )
UpperCamelCase__ :List[str] = val
return imga
def a ( __a ) -> tuple:
'''simple docstring'''
UpperCamelCase__ :List[str] = args[1] if args[1:] else '''../image_data/lena.jpg'''
UpperCamelCase__ :Union[str, Any] = float(args[2] ) if args[2:] else 1.0
UpperCamelCase__ :Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
UpperCamelCase__ :Tuple = int(args[4] )
UpperCamelCase__ :Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
UpperCamelCase__ :Optional[Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
__snake_case , __snake_case , __snake_case , __snake_case = parse_args(sys.argv)
__snake_case = cva.imread(filename, 0)
cva.imshow('''input image''', img)
__snake_case = img / 255
__snake_case = out.astype('''float32''')
__snake_case = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
__snake_case = out * 255
__snake_case = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows() | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__snake_case = logging.get_logger(__name__)
class lowercase ( A__ ):
"""simple docstring"""
_a = CLIPConfig
_a = ['CLIPEncoderLayer']
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
super().__init__(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = CLIPVisionModelWithProjection(config.vision_config )
UpperCamelCase__ :Union[str, Any] = nn.Linear(config.vision_config.projection_dim , 1 )
UpperCamelCase__ :int = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0.5 , UpperCamelCase_=0.5 ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.vision_model(UpperCamelCase_ )[0]
UpperCamelCase__ :List[Any] = self.p_head(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = nsfw_detected.flatten()
UpperCamelCase__ :str = nsfw_detected > p_threshold
UpperCamelCase__ :List[Any] = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
UpperCamelCase__ :Tuple = np.zeros(images[idx].shape )
UpperCamelCase__ :Optional[Any] = self.w_head(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = watermark_detected.flatten()
UpperCamelCase__ :Dict = watermark_detected > w_threshold
UpperCamelCase__ :int = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
UpperCamelCase__ :Dict = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
UpperCamelCase__ :int = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCamelCase__ :Any = model(UpperCamelCase_ )['''last_hidden_state''']
UpperCamelCase__ :Any = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
UpperCamelCase__ :Any = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
'''simple docstring'''
import math
def a ( __a , __a = 0 , __a = 0 ) -> list:
'''simple docstring'''
UpperCamelCase__ :Dict = end or len(__a )
for i in range(__a , __a ):
UpperCamelCase__ :Tuple = i
UpperCamelCase__ :Union[str, Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCamelCase__ :Any = array[temp_index - 1]
temp_index -= 1
UpperCamelCase__ :Union[str, Any] = temp_index_value
return array
def a ( __a , __a , __a ) -> None: # Max Heap
'''simple docstring'''
UpperCamelCase__ :Dict = index
UpperCamelCase__ :Optional[Any] = 2 * index + 1 # Left Node
UpperCamelCase__ :Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCamelCase__ :Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCamelCase__ :Dict = right_index
if largest != index:
UpperCamelCase__ , UpperCamelCase__ :Tuple = array[largest], array[index]
heapify(__a , __a , __a )
def a ( __a ) -> list:
'''simple docstring'''
UpperCamelCase__ :List[str] = len(__a )
for i in range(n // 2 , -1 , -1 ):
heapify(__a , __a , __a )
for i in range(n - 1 , 0 , -1 ):
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = array[0], array[i]
heapify(__a , 0 , __a )
return array
def a ( __a , __a , __a , __a ) -> int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a ( __a , __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :str = low
UpperCamelCase__ :Dict = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCamelCase__ , UpperCamelCase__ :int = array[j], array[i]
i += 1
def a ( __a ) -> list:
'''simple docstring'''
if len(__a ) == 0:
return array
UpperCamelCase__ :Tuple = 2 * math.ceil(math.loga(len(__a ) ) )
UpperCamelCase__ :Optional[Any] = 16
return intro_sort(__a , 0 , len(__a ) , __a , __a )
def a ( __a , __a , __a , __a , __a ) -> list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__a )
max_depth -= 1
UpperCamelCase__ :Any = median_of_a(__a , __a , start + ((end - start) // 2) + 1 , end - 1 )
UpperCamelCase__ :int = partition(__a , __a , __a , __a )
intro_sort(__a , __a , __a , __a , __a )
UpperCamelCase__ :Union[str, Any] = p
return insertion_sort(__a , __a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma : ''').strip()
__snake_case = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted)) | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=A__ ):
"""simple docstring"""
_a = ['note_seq']
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
requires_backends(self , ['''note_seq'''] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] ) | 97 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''OwlViTFeatureExtractor''']
__snake_case = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 1 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :str = torch.exp(__a )
UpperCamelCase__ :Tuple = torch.sum(__a , dim=1 ) # sum of exp(x_i)
UpperCamelCase__ :Optional[Any] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__a ) - B / A
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Dict = config.output_attentions
UpperCamelCase__ :Union[str, Any] = config.output_hidden_states
UpperCamelCase__ :List[str] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase__ :int = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase__ :str = [-1 for _ in range(config.num_hidden_layers )]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
UpperCamelCase__ :Union[str, Any] = x
else:
UpperCamelCase__ :Optional[Any] = x
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :str = ()
UpperCamelCase__ :Optional[int] = ()
UpperCamelCase__ :Optional[int] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
UpperCamelCase__ :Optional[int] = all_hidden_states + (hidden_states,)
UpperCamelCase__ :Dict = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :str = layer_outputs[0]
if self.output_attentions:
UpperCamelCase__ :int = all_attentions + (layer_outputs[1],)
UpperCamelCase__ :List[Any] = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase__ :Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase__ :int = current_outputs + (all_attentions,)
UpperCamelCase__ :Optional[int] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
UpperCamelCase__ :List[str] = highway_exit[0]
UpperCamelCase__ :List[Any] = entropy(UpperCamelCase_ )
UpperCamelCase__ :Tuple = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
UpperCamelCase__ :int = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
UpperCamelCase__ :str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
UpperCamelCase__ :int = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
UpperCamelCase__ :int = all_hidden_states + (hidden_states,)
UpperCamelCase__ :List[Any] = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase__ :Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase__ :int = outputs + (all_attentions,)
UpperCamelCase__ :Optional[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , A__ , )
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
super().__init__(UpperCamelCase_ )
UpperCamelCase__ :Tuple = config
UpperCamelCase__ :Dict = BertEmbeddings(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = DeeBertEncoder(UpperCamelCase_ )
UpperCamelCase__ :str = BertPooler(UpperCamelCase_ )
self.init_weights()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.embeddings.word_embeddings
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = value
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
UpperCamelCase__ :List[str] = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase__ :int = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
UpperCamelCase__ :Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase__ :str = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
UpperCamelCase__ :str = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
UpperCamelCase__ :Optional[int] = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase__ :torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
UpperCamelCase__ :Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
UpperCamelCase__ :int = encoder_attention_mask[:, None, None, :]
UpperCamelCase__ :int = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
UpperCamelCase__ :str = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase__ :Dict = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
UpperCamelCase__ :List[str] = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
UpperCamelCase__ :Any = encoder_outputs[0]
UpperCamelCase__ :Tuple = self.pooler(UpperCamelCase_ )
UpperCamelCase__ :List[str] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = message
UpperCamelCase__ :Union[str, Any] = exit_layer # start from 1!
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Dict = BertPooler(UpperCamelCase_ )
UpperCamelCase__ :Tuple = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase__ :Optional[int] = nn.Linear(config.hidden_size , config.num_labels )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = encoder_outputs[0]
UpperCamelCase__ :Optional[Any] = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
UpperCamelCase__ :Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
UpperCamelCase__ :Any = bmodel_output[1]
UpperCamelCase__ :Union[str, Any] = self.dropout(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , A__ , )
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
super().__init__(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = config.num_labels
UpperCamelCase__ :Tuple = config.num_hidden_layers
UpperCamelCase__ :Any = DeeBertModel(UpperCamelCase_ )
UpperCamelCase__ :Any = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase__ :Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=-1 , UpperCamelCase_=False , ):
'''simple docstring'''
UpperCamelCase__ :int = self.num_layers
try:
UpperCamelCase__ :Optional[int] = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
UpperCamelCase__ :List[str] = outputs[1]
UpperCamelCase__ :int = self.dropout(UpperCamelCase_ )
UpperCamelCase__ :Any = self.classifier(UpperCamelCase_ )
UpperCamelCase__ :List[str] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase__ :Any = e.message
UpperCamelCase__ :Union[str, Any] = e.exit_layer
UpperCamelCase__ :str = outputs[0]
if not self.training:
UpperCamelCase__ :Optional[Any] = entropy(UpperCamelCase_ )
UpperCamelCase__ :Dict = []
UpperCamelCase__ :int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase__ :Union[str, Any] = MSELoss()
UpperCamelCase__ :Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase__ :Dict = CrossEntropyLoss()
UpperCamelCase__ :Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase__ :Optional[Any] = []
for highway_exit in outputs[-1]:
UpperCamelCase__ :Any = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase__ :int = MSELoss()
UpperCamelCase__ :List[str] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase__ :Union[str, Any] = CrossEntropyLoss()
UpperCamelCase__ :Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
UpperCamelCase__ :Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase__ :Union[str, Any] = (loss,) + outputs
if not self.training:
UpperCamelCase__ :Any = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase__ :Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits) | 97 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'deberta-v2'
def __init__( self , UpperCamelCase_=128100 , UpperCamelCase_=1536 , UpperCamelCase_=24 , UpperCamelCase_=24 , UpperCamelCase_=6144 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-7 , UpperCamelCase_=False , UpperCamelCase_=-1 , UpperCamelCase_=0 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=0 , UpperCamelCase_="gelu" , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Union[str, Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Optional[Any] = intermediate_size
UpperCamelCase__ :int = hidden_act
UpperCamelCase__ :Dict = hidden_dropout_prob
UpperCamelCase__ :str = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :Dict = initializer_range
UpperCamelCase__ :Any = relative_attention
UpperCamelCase__ :int = max_relative_positions
UpperCamelCase__ :List[Any] = pad_token_id
UpperCamelCase__ :List[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase_ ) == str:
UpperCamelCase__ :Optional[Any] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
UpperCamelCase__ :List[str] = pos_att_type
UpperCamelCase__ :Dict = vocab_size
UpperCamelCase__ :Optional[Any] = layer_norm_eps
UpperCamelCase__ :Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = pooler_dropout
UpperCamelCase__ :str = pooler_hidden_act
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 12
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = 3 , UpperCamelCase_ = 40 , UpperCamelCase_ = 40 , UpperCamelCase_ = None , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase_ , framework=UpperCamelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 97 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = LongformerTokenizer
_a = True
_a = LongformerTokenizerFast
_a = True
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ :Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCamelCase__ :Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase__ :Any = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = '''lower newer'''
UpperCamelCase__ :List[str] = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ :List[str] = '''lower newer'''
UpperCamelCase__ :Tuple = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase__ :str = tokenizer.tokenize(UpperCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = tokens + [tokenizer.unk_token]
UpperCamelCase__ :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase_ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase_ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
UpperCamelCase__ :Any = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
UpperCamelCase__ :Tuple = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ :List[str] = '''Encode this sequence.'''
UpperCamelCase__ :Any = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
UpperCamelCase__ :Optional[int] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
UpperCamelCase__ :List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
UpperCamelCase__ :Any = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing spaces after special tokens
UpperCamelCase__ :Any = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )} ) # mask token has a left space
UpperCamelCase__ :Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
UpperCamelCase__ :int = '''Encode <mask> sequence'''
UpperCamelCase__ :Tuple = '''Encode <mask>sequence'''
UpperCamelCase__ :List[Any] = tokenizer.encode(UpperCamelCase_ )
UpperCamelCase__ :List[str] = encoded.index(UpperCamelCase_ )
UpperCamelCase__ :int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer.encode(UpperCamelCase_ )
UpperCamelCase__ :Dict = encoded.index(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[Any] = '''A, <mask> AllenNLP sentence.'''
UpperCamelCase__ :Union[str, Any] = tokenizer_r.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
UpperCamelCase__ :Tuple = tokenizer_p.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCamelCase__ :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCamelCase__ :Any = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase__ :Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase__ :List[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase_ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase_ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :Tuple = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ :str = F'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ :str = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :Optional[int] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :Optional[int] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :int = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :str = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , ) | 97 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 1 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def a ( __a , __a=1 ) -> int:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def a ( __a , __a=0 ) -> str:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
for old_item in old_list:
UpperCamelCase__ :Union[str, Any] = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCamelCase__ :List[str] = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCamelCase__ :Dict = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCamelCase__ :Dict = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCamelCase__ :Any = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCamelCase__ :Union[str, Any] = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCamelCase__ :List[str] = shave_segments(__a , n_shave_prefix_segments=__a )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def a ( __a , __a=0 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
for old_item in old_list:
UpperCamelCase__ :Tuple = old_item
UpperCamelCase__ :Any = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCamelCase__ :List[Any] = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCamelCase__ :Union[str, Any] = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCamelCase__ :Tuple = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCamelCase__ :Optional[int] = shave_segments(__a , n_shave_prefix_segments=__a )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def a ( __a , __a , __a , __a=None , __a=None , __a=None ) -> str:
'''simple docstring'''
assert isinstance(__a , __a ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCamelCase__ :Any = old_checkpoint[path]
UpperCamelCase__ :List[str] = old_tensor.shape[0] // 3
UpperCamelCase__ :Optional[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCamelCase__ :int = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCamelCase__ :Dict = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = old_tensor.split(channels // num_heads , dim=1 )
UpperCamelCase__ :Union[str, Any] = query.reshape(__a )
UpperCamelCase__ :List[Any] = key.reshape(__a )
UpperCamelCase__ :Union[str, Any] = value.reshape(__a )
for path in paths:
UpperCamelCase__ :Tuple = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCamelCase__ :Dict = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCamelCase__ :List[str] = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCamelCase__ :str = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCamelCase__ :Tuple = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCamelCase__ :List[str] = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCamelCase__ :List[str] = old_checkpoint[path['''old''']]
def a ( __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :int = {}
UpperCamelCase__ :Union[str, Any] = checkpoint['''time_embed.0.weight''']
UpperCamelCase__ :Tuple = checkpoint['''time_embed.0.bias''']
UpperCamelCase__ :Union[str, Any] = checkpoint['''time_embed.2.weight''']
UpperCamelCase__ :Union[str, Any] = checkpoint['''time_embed.2.bias''']
UpperCamelCase__ :List[str] = checkpoint['''input_blocks.0.0.weight''']
UpperCamelCase__ :Tuple = checkpoint['''input_blocks.0.0.bias''']
UpperCamelCase__ :Any = checkpoint['''out.0.weight''']
UpperCamelCase__ :Dict = checkpoint['''out.0.bias''']
UpperCamelCase__ :str = checkpoint['''out.2.weight''']
UpperCamelCase__ :List[Any] = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCamelCase__ :Optional[Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCamelCase__ :Optional[int] = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(__a )
}
# Retrieves the keys for the middle blocks only
UpperCamelCase__ :int = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCamelCase__ :int = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(__a )
}
# Retrieves the keys for the output blocks only
UpperCamelCase__ :List[str] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCamelCase__ :Any = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(__a )
}
for i in range(1 , __a ):
UpperCamelCase__ :Optional[int] = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCamelCase__ :List[Any] = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCamelCase__ :int = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
UpperCamelCase__ :Dict = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
UpperCamelCase__ :List[Any] = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
UpperCamelCase__ :Optional[Any] = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
UpperCamelCase__ :List[Any] = renew_resnet_paths(__a )
UpperCamelCase__ :Union[str, Any] = {'''old''': f'''input_blocks.{i}.0''', '''new''': f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
UpperCamelCase__ :Union[str, Any] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path, resnet_op] , config=__a )
if len(__a ):
UpperCamelCase__ :List[Any] = renew_attention_paths(__a )
UpperCamelCase__ :List[str] = {
'''old''': f'''input_blocks.{i}.1''',
'''new''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
UpperCamelCase__ :Optional[Any] = {
f'''input_blocks.{i}.1.qkv.bias''': {
'''key''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
'''key''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=__a , config=__a , )
UpperCamelCase__ :str = middle_blocks[0]
UpperCamelCase__ :List[Any] = middle_blocks[1]
UpperCamelCase__ :Optional[int] = middle_blocks[2]
UpperCamelCase__ :List[str] = renew_resnet_paths(__a )
assign_to_checkpoint(__a , __a , __a , config=__a )
UpperCamelCase__ :Optional[int] = renew_resnet_paths(__a )
assign_to_checkpoint(__a , __a , __a , config=__a )
UpperCamelCase__ :List[str] = renew_attention_paths(__a )
UpperCamelCase__ :int = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
__a , __a , __a , attention_paths_to_split=__a , config=__a )
for i in range(__a ):
UpperCamelCase__ :List[str] = i // (config['''num_res_blocks'''] + 1)
UpperCamelCase__ :Optional[Any] = i % (config['''num_res_blocks'''] + 1)
UpperCamelCase__ :Optional[int] = [shave_segments(__a , 2 ) for name in output_blocks[i]]
UpperCamelCase__ :Optional[int] = {}
for layer in output_block_layers:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = layer.split('''.''' )[0], shave_segments(__a , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__a )
else:
UpperCamelCase__ :Dict = [layer_name]
if len(__a ) > 1:
UpperCamelCase__ :Dict = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
UpperCamelCase__ :str = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
UpperCamelCase__ :Tuple = renew_resnet_paths(__a )
UpperCamelCase__ :int = renew_resnet_paths(__a )
UpperCamelCase__ :Union[str, Any] = {'''old''': f'''output_blocks.{i}.0''', '''new''': f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCamelCase__ :str = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCamelCase__ :str = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
UpperCamelCase__ :List[Any] = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__a ) == 2:
UpperCamelCase__ :str = []
if len(__a ):
UpperCamelCase__ :Optional[int] = renew_attention_paths(__a )
UpperCamelCase__ :Optional[Any] = {
'''old''': f'''output_blocks.{i}.1''',
'''new''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
UpperCamelCase__ :Optional[int] = {
f'''output_blocks.{i}.1.qkv.bias''': {
'''key''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
'''key''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=__a , )
else:
UpperCamelCase__ :Dict = renew_resnet_paths(__a , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCamelCase__ :List[Any] = '''.'''.join(['''output_blocks''', str(__a ), path['''old''']] )
UpperCamelCase__ :Optional[int] = '''.'''.join(['''up_blocks''', str(__a ), '''resnets''', str(__a ), path['''new''']] )
UpperCamelCase__ :int = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__snake_case = parser.parse_args()
__snake_case = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__snake_case = json.loads(f.read())
__snake_case = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__snake_case = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__snake_case = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__snake_case = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__snake_case = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path) | 97 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
UpperCamelCase__ :Any = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ :Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :str = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
UpperCamelCase__ :Optional[Any] = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''lower newer'''
UpperCamelCase__ :Union[str, Any] = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ :Union[str, Any] = '''lower newer'''
UpperCamelCase__ :str = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
UpperCamelCase__ :str = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Dict = tokens + [tokenizer.unk_token]
UpperCamelCase__ :Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
@require_ftfy
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Any = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
UpperCamelCase__ :Union[str, Any] = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ :Tuple = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
UpperCamelCase__ :Union[str, Any] = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ :List[str] = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ :Tuple = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCamelCase__ :List[str] = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ :Optional[Any] = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ :Dict = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCamelCase__ :List[str] = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :str = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ :Tuple = F'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ :Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
UpperCamelCase__ :Tuple = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :List[Any] = F''' {text}'''
UpperCamelCase__ :List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
UpperCamelCase__ :List[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__snake_case = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__snake_case = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__snake_case = os.environ.get('''USER_TOKEN''', '''''')
def a ( __a ) -> dict[Any, Any]:
'''simple docstring'''
UpperCamelCase__ :str = {
'''Authorization''': f'''token {auth_token}''',
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(__a , headers=__a ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''') | 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 1 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def a ( __a ) -> datetime:
'''simple docstring'''
UpperCamelCase__ :Tuple = year % 19
UpperCamelCase__ :Optional[Any] = year % 4
UpperCamelCase__ :Dict = year % 7
UpperCamelCase__ :Union[str, Any] = math.floor(year / 100 )
UpperCamelCase__ :List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCamelCase__ :List[str] = leap_day_inhibits / 4
UpperCamelCase__ :Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase__ :List[str] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase__ :List[Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase__ :Optional[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__snake_case = '''will be''' if year > datetime.now().year else '''was'''
print(F"""Easter in {year} {tense} {gauss_easter(year)}""") | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
UpperCamelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCamelCase__ :List[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
UpperCamelCase__ :Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
UpperCamelCase__ :Union[str, Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ ).loss
UpperCamelCase__ :Union[str, Any] = -tf.math.reduce_mean(UpperCamelCase_ ).numpy()
UpperCamelCase__ :Tuple = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 97 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.