code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
lowercase_ : Optional[int] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowercase_ : List[str] = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowercase_ : Optional[Any] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowercase_ : Any = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowercase_ : Dict = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowercase_ : Any = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase_ : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowercase_ : List[Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowercase_ : Union[str, Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowercase_ : str = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowercase_ : Optional[Any] = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase_ : List[Any] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowercase_ : List[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowercase_ : List[Any] = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowercase_ : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowercase_ : int = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowercase_ : Dict = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowercase_ : Optional[Any] = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowercase_ : int = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowercase_ : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase_ : str = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowercase_ : Optional[int] = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowercase_ : List[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowercase_ : Dict = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase_ : List[Any] = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase_ : Optional[int] = key.split("." )
lowercase_ , lowercase_ : int = int(key_split[2] ), int(key_split[4] )
lowercase_ : Tuple = config.vision_config.hidden_size
if "weight" in key:
lowercase_ : Union[str, Any] = val[:dim, :]
lowercase_ : Union[str, Any] = val[dim : dim * 2, :]
lowercase_ : Tuple = val[-dim:, :]
else:
lowercase_ : int = val[:dim]
lowercase_ : Optional[int] = val[dim : dim * 2]
lowercase_ : List[Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase_ : List[str] = key.split("." )
lowercase_ : str = int(key_split[3] )
lowercase_ : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
lowercase_ : List[str] = val[:dim, :]
lowercase_ : Any = val[
dim : dim * 2, :
]
lowercase_ : Dict = val[-dim:, :]
else:
lowercase_ : Union[str, Any] = val[:dim]
lowercase_ : Dict = val[dim : dim * 2]
lowercase_ : int = val[-dim:]
else:
lowercase_ : Optional[Any] = rename_key(_UpperCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase_ : int = val.squeeze_()
else:
lowercase_ : Dict = val
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="groupvit-gcc-yfcc" , _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : Union[str, Any] = GroupViTConfig()
lowercase_ : Tuple = GroupViTModel(_UpperCamelCase ).eval()
lowercase_ : int = torch.load(_UpperCamelCase , map_location="cpu" )["model"]
lowercase_ : Tuple = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
lowercase_ , lowercase_ : Tuple = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_UpperCamelCase ) == 0)
# verify result
lowercase_ : Tuple = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowercase_ : List[Any] = prepare_img()
lowercase_ : List[str] = processor(text=["a photo of a cat", "a photo of a dog"] , images=_UpperCamelCase , padding=_UpperCamelCase , return_tensors="pt" )
with torch.no_grad():
lowercase_ : int = model(**_UpperCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowercase_ : List[Any] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase_ : Optional[Any] = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , _UpperCamelCase , atol=1e-3 )
processor.save_pretrained(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print("Successfully saved processor and model to" , _UpperCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_UpperCamelCase , organization="nielsr" )
model.push_to_hub(_UpperCamelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCamelCase__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.txt'}
UpperCamelCase__ = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
UpperCamelCase__ = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
with open(_UpperCamelCase , "r" ) as f:
lowercase_ : Optional[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[str] = VOCAB_FILES_NAMES
__lowerCamelCase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: Any = ['input_ids', 'attention_mask']
def __init__( self : Tuple , a : str , a : List[Any]="<unk>" , a : List[Any]="<cls>" , a : Tuple="<pad>" , a : int="<mask>" , a : List[Any]="<eos>" , **a : Dict , ):
'''simple docstring'''
super().__init__(**a )
lowercase_ : List[Any] = load_vocab_file(a )
lowercase_ : Tuple = dict(enumerate(self.all_tokens ) )
lowercase_ : Union[str, Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ : Any = unk_token
lowercase_ : List[Any] = cls_token
lowercase_ : str = pad_token
lowercase_ : str = mask_token
lowercase_ : List[str] = eos_token
lowercase_ : Optional[int] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCAmelCase__ ( self : int , a : int ):
'''simple docstring'''
return self._id_to_token.get(a , self.unk_token )
def lowerCAmelCase__ ( self : Optional[int] , a : str ):
'''simple docstring'''
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[Any] , **a : Union[str, Any] ):
'''simple docstring'''
return text.split()
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str]=False ):
'''simple docstring'''
return len(self._id_to_token )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCAmelCase__ ( self : int , a : str ):
'''simple docstring'''
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Dict , a : int ):
'''simple docstring'''
return self._id_to_token.get(a , self.unk_token )
def lowerCAmelCase__ ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : int = [self.cls_token_id]
lowercase_ : Any = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCAmelCase__ ( self : Dict , a : List , a : Optional[List] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ : Dict = [1] + ([0] * len(a )) + [1]
if token_ids_a is not None:
mask += [0] * len(a ) + [1]
return mask
def lowerCAmelCase__ ( self : List[str] , a : Union[str, Any] , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = os.path.join(a , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(a , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=a )
def lowerCAmelCase__ ( self : int , a : Union[List[str], List[AddedToken]] , a : bool = False ):
'''simple docstring'''
return super()._add_tokens(a , special_tokens=a )
| 640
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = 1
lowercase_ : Union[str, Any] = 2
while i * i <= n:
lowercase_ : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = 1
lowercase_ : Any = 1
while True:
i += 1
t_num += i
if count_divisors(_UpperCamelCase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 640
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
lowercase_ : Dict = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(a )
from datasets import load_dataset
lowercase_ : List[Any] = load_dataset("nielsr/rvlcdip-demo" )
lowercase_ : Optional[int] = dataset["train"][0]["image"].convert("RGB" )
lowercase_ : Dict = image_processor(a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
lowercase_ : List[Any] = model(**a )
lowercase_ : Optional[Any] = outputs.logits
lowercase_ : Optional[int] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase_ : str = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 640
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 1
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Dict = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
lowercase_ : str = [2, 4, 6, 8, 1_0, 1_2]
lowercase_ : Union[str, Any] = 1_0_0
self.assertEqual(kp.calc_profit(a , a , a ) , 2_1_0 )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertRaisesRegex(a , "max_weight must greater than zero." )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertRaisesRegex(a , "Weight can not be negative." )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertRaisesRegex(a , "Profit can not be negative." )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.assertRaisesRegex(a , "max_weight must greater than zero." )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.assertRaisesRegex(
a , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 640
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , a : List[Any] , a : Dict=2 , a : int=True , a : Optional[Any]=False , a : str=1_0 , a : Union[str, Any]=3 , a : str=3_2 * 8 , a : Tuple=3_2 * 8 , a : Optional[int]=4 , a : List[str]=6_4 , ):
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : List[str] = batch_size
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_auxiliary_loss
lowercase_ : List[Any] = num_queries
lowercase_ : Any = num_channels
lowercase_ : Optional[int] = min_size
lowercase_ : Dict = max_size
lowercase_ : Tuple = num_labels
lowercase_ : Optional[int] = hidden_dim
lowercase_ : Optional[int] = hidden_dim
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
a )
lowercase_ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a )
lowercase_ : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a ) > 0.5
).float()
lowercase_ : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=a ) > 0.5).long()
lowercase_ : Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowercase_ : Optional[Any] = self.num_queries
lowercase_ : int = self.num_labels
lowercase_ : Tuple = [1, 1, 1, 1]
lowercase_ : Tuple = self.num_channels
lowercase_ : Dict = 6_4
lowercase_ : Union[str, Any] = 1_2_8
lowercase_ : Tuple = self.hidden_dim
lowercase_ : Optional[Any] = self.hidden_dim
lowercase_ : List[Any] = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ : Tuple = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = output.encoder_hidden_states
lowercase_ : List[Any] = output.pixel_decoder_hidden_states
lowercase_ : Optional[int] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a ) , config.decoder_layers )
def lowerCAmelCase__ ( self : int , a : str , a : Dict , a : Dict , a : Dict=False ):
'''simple docstring'''
with torch.no_grad():
lowercase_ : str = MaskaFormerModel(config=a )
model.to(a )
model.eval()
lowercase_ : str = model(pixel_values=a , pixel_mask=a )
lowercase_ : List[str] = model(a , output_hidden_states=a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any] , a : str , a : Tuple , a : str , a : Dict ):
'''simple docstring'''
lowercase_ : List[str] = MaskaFormerForUniversalSegmentation(config=a )
model.to(a )
model.eval()
def comm_check_on_output(a : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase_ : Optional[Any] = model(pixel_values=a , pixel_mask=a )
lowercase_ : str = model(a )
comm_check_on_output(a )
lowercase_ : int = model(
pixel_values=a , pixel_mask=a , mask_labels=a , class_labels=a )
comm_check_on_output(a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: int = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__lowerCamelCase: Any = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
__lowerCamelCase: Dict = False
__lowerCamelCase: int = False
__lowerCamelCase: Tuple = False
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[int] = MaskaFormerModelTester(self )
lowercase_ : Dict = ConfigTester(self , config_class=a , has_text_modality=a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(a , **a , output_hidden_states=a )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*a )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(a )
lowercase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
@slow
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase_ : Union[str, Any] = MaskaFormerModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Union[str, Any] = (self.model_tester.min_size,) * 2
lowercase_ : Tuple = {
"pixel_values": torch.randn((2, 3, *size) , device=a ),
"mask_labels": torch.randn((2, 1_0, *size) , device=a ),
"class_labels": torch.zeros(2 , 1_0 , device=a ).long(),
}
lowercase_ : str = self.model_tester.get_config()
lowercase_ : List[Any] = MaskaFormerForUniversalSegmentation(a ).to(a )
lowercase_ : List[Any] = model(**a )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(a , **a , output_hidden_states=a )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(a ).to(a )
lowercase_ : Optional[int] = model(**a , output_attentions=a )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ : Optional[Any] = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowercase_ : Union[str, Any] = model_class(a )
model.to(a )
model.train()
lowercase_ : str = model(a , mask_labels=a , class_labels=a ).loss
loss.backward()
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[int] = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
lowercase_ : str = True
lowercase_ : Dict = True
lowercase_ : Optional[Any] = model_class(a ).to(a )
model.train()
lowercase_ : Tuple = model(a , mask_labels=a , class_labels=a )
lowercase_ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase_ : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase_ : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase__ = 1e-4
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(a )
lowercase_ : Tuple = self.default_image_processor
lowercase_ : str = prepare_img()
lowercase_ : List[Any] = image_processor(a , return_tensors="pt" ).to(a )
lowercase_ : Optional[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(a , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
lowercase_ : int = model(**a )
lowercase_ : Dict = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) )
lowercase_ : Optional[Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) )
lowercase_ : List[Any] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a , atol=a ) )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a ).eval()
lowercase_ : Optional[int] = self.default_image_processor
lowercase_ : int = prepare_img()
lowercase_ : Any = image_processor(a , return_tensors="pt" ).to(a )
lowercase_ : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(a , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
lowercase_ : Tuple = model(**a )
# masks_queries_logits
lowercase_ : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase_ : Optional[Any] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowercase_ : List[Any] = torch.tensor(a ).to(a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a , atol=a ) )
# class_queries_logits
lowercase_ : Optional[Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase_ : Union[str, Any] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=a ) )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a ).eval()
lowercase_ : Optional[int] = self.default_image_processor
lowercase_ : List[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
lowercase_ : Union[str, Any] = inputs["pixel_values"].to(a )
lowercase_ : int = [el.to(a ) for el in inputs["mask_labels"]]
lowercase_ : Optional[int] = [el.to(a ) for el in inputs["class_labels"]]
with torch.no_grad():
lowercase_ : Optional[Any] = model(**a )
self.assertTrue(outputs.loss is not None )
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = os.path.join(args.tf_model_dir , "parameters.json" )
lowercase_ : str = json.loads(open(_UpperCamelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
lowercase_ : str = args.output + ".pt"
lowercase_ : Any = OrderedDict()
with tf.device("/CPU:0" ):
lowercase_ : str = tf.train.load_checkpoint(args.tf_model_dir )
lowercase_ : Any = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase_ : Optional[Any] = reader.get_tensor(_UpperCamelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
lowercase_ : Optional[int] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
lowercase_ : Tuple = 8
lowercase_ : str = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Optional[int] = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/moe" ):
lowercase_ : Dict = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
lowercase_ : Optional[Any] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
lowercase_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Tuple = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/softmlp/kernel" ):
lowercase_ : str = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
lowercase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Any = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
lowercase_ : List[str] = key_name[-9:-7]
for i in range(16 ):
lowercase_ : Optional[int] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
lowercase_ : Optional[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase_ : Optional[int] = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/mlp" ):
lowercase_ : Dict = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
lowercase_ : List[Any] = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
lowercase_ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Tuple = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p1/bias" ):
lowercase_ : Optional[int] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
lowercase_ : Tuple = vnp.copy() # same because it is one dimensional
lowercase_ : List[str] = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/kernel" ):
lowercase_ : Optional[int] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
lowercase_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Tuple = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/bias" ):
lowercase_ : Tuple = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
lowercase_ : Dict = vnp.copy() # same because it is one dimensional
lowercase_ : Dict = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/ln" ):
lowercase_ : Tuple = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowercase_ : Dict = "model.blocks.%d.feed_forward.norm.bias" % player
lowercase_ : Optional[int] = vnp.copy() # same because it is one dimensional
lowercase_ : Union[str, Any] = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
lowercase_ : List[str] = "model.blocks.%d.feed_forward.norm.weight" % player
lowercase_ : Dict = vnp.copy() # same because it is one dimensional
lowercase_ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/att" ):
lowercase_ : List[str] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
lowercase_ : Optional[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase_ : Union[str, Any] = state[:, 0, :, :]
lowercase_ : int = state[:, 1, :, :]
lowercase_ : Any = state[:, 2, :, :]
lowercase_ : List[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Dict = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Union[str, Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Union[str, Any] = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
lowercase_ : Dict = torch.tensor(_UpperCamelCase )
lowercase_ : Any = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
lowercase_ : Optional[int] = torch.tensor(_UpperCamelCase )
lowercase_ : Tuple = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
lowercase_ : int = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/o/kernel" ):
lowercase_ : List[str] = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
lowercase_ : List[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Optional[int] = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/an" ):
lowercase_ : str = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
lowercase_ : str = "model.blocks.%d.self_attn.norm.bias" % player
lowercase_ : str = vnp.copy() # same because it is one dimensional
lowercase_ : int = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
lowercase_ : List[Any] = "model.blocks.%d.self_attn.norm.weight" % player
lowercase_ : Dict = vnp.copy() # same because it is one dimensional
lowercase_ : Optional[Any] = torch.tensor(_UpperCamelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
lowercase_ : Tuple = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
lowercase_ : Tuple = "model.%s.weight" % nlayer
lowercase_ : Tuple = vnp.copy() # same in embedded
lowercase_ : Any = torch.tensor(_UpperCamelCase )
if key_name.startswith("model/wte" ):
lowercase_ : Any = "lm_head.weight"
lowercase_ : List[Any] = vnp.copy() # same in embedded
lowercase_ : int = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/wob" ):
lowercase_ : Any = "final_logits_bias"
lowercase_ : Any = vnp.copy() # same in embedded
lowercase_ : Optional[Any] = state.reshape((1, -1) )
lowercase_ : Any = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense/kernel":
lowercase_ : Optional[Any] = "model.last_project.weight"
lowercase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : int = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense_1/bias":
lowercase_ : List[Any] = "model.last_project.bias"
lowercase_ : Any = vnp.copy() # same because it is one dimensional
lowercase_ : int = torch.tensor(_UpperCamelCase )
torch.save(_UpperCamelCase , args.output )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
UpperCamelCase__ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 640
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 1
|
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Any , a : int = 1_6 , a : int = 8_8 , a : Optional[int] = None , a : int = 1 , a : float = 0.0 , a : int = 3_2 , a : Optional[int] = None , a : bool = False , a : Optional[int] = None , a : Optional[int] = None , a : str = "geglu" , a : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
lowercase_ : Dict = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=a , attention_head_dim=a , in_channels=a , num_layers=a , dropout=a , norm_num_groups=a , cross_attention_dim=a , attention_bias=a , sample_size=a , num_vector_embeds=a , activation_fn=a , num_embeds_ada_norm=a , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowercase_ : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowercase_ : Optional[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowercase_ : List[Any] = [1, 0]
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : Tuple , a : Union[str, Any]=None , a : Optional[Any]=None , a : Tuple=None , a : bool = True , ):
'''simple docstring'''
lowercase_ : Any = hidden_states
lowercase_ : Tuple = []
lowercase_ : Optional[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowercase_ : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowercase_ : Dict = self.transformer_index_for_condition[i]
lowercase_ : Union[str, Any] = self.transformers[transformer_index](
a , encoder_hidden_states=a , timestep=a , cross_attention_kwargs=a , return_dict=a , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowercase_ : Tuple = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowercase_ : str = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=a )
| 640
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 1
|
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ : int = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
lowercase_ : int = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase_ : List[Any] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase_ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : Optional[Any] = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
lowercase_ : Tuple = F"""layers_{str(SCREAMING_SNAKE_CASE_ )}"""
# Self-Attention
lowercase_ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase_ : Dict = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase_ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase_ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : int = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase_ : Any = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase_ : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase_ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase_ : List[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase_ : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase_ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase_ : Dict = flax_model.params["encoder"]["block"][str(SCREAMING_SNAKE_CASE_ )]["layer"]
lowercase_ : List[str] = tax_attention_key
lowercase_ : Tuple = tax_attention_out
lowercase_ : List[Any] = tax_attention_query
lowercase_ : int = tax_attention_value
lowercase_ : List[Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : str = tax_global_layer_norm
if split_mlp_wi:
lowercase_ : Any = tax_mlp_wi_a
lowercase_ : Optional[Any] = tax_mlp_wi_a
else:
lowercase_ : Optional[Any] = tax_mlp_wi
lowercase_ : str = tax_mlp_wo
lowercase_ : Dict = tax_mlp_layer_norm
lowercase_ : Optional[Any] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase_ : Optional[Any] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase_ : int = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase_ : Tuple = tax_encoder_global_rel_embedding
# Assigning
lowercase_ : List[str] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase_ : Optional[int] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowercase_ : int = F"""layers_{str(SCREAMING_SNAKE_CASE_ )}"""
# Self-Attention
lowercase_ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase_ : int = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase_ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase_ : int = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase_ : Dict = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase_ : Optional[int] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase_ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase_ : List[str] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase_ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase_ : Any = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase_ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase_ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase_ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase_ : Dict = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase_ : Optional[int] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase_ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase_ : Dict = flax_model.params["decoder"]["block"][str(SCREAMING_SNAKE_CASE_ )]["layer"]
lowercase_ : str = tax_attention_key
lowercase_ : Union[str, Any] = tax_attention_out
lowercase_ : List[Any] = tax_attention_query
lowercase_ : str = tax_attention_value
lowercase_ : List[Any] = tax_pre_attention_layer_norm
lowercase_ : int = tax_enc_dec_attention_key
lowercase_ : List[Any] = tax_enc_dec_attention_out
lowercase_ : List[str] = tax_enc_dec_attention_query
lowercase_ : str = tax_enc_dec_attention_value
lowercase_ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase_ : int = tax_mlp_wi_a
lowercase_ : str = tax_mlp_wi_a
else:
lowercase_ : List[str] = tax_mlp_wi
lowercase_ : List[Any] = tax_mlp_wo
lowercase_ : List[str] = txa_mlp_layer_norm
lowercase_ : Union[str, Any] = flax_model_decoder_layer_block
# Decoder Normalization
lowercase_ : Tuple = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase_ : int = txa_decoder_norm
# Only for layer 0:
lowercase_ : str = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase_ : Tuple = tax_decoder_rel_embedding
# Token Embeddings
lowercase_ : int = tax_model["target"]["token_embedder"]["embedding"]
lowercase_ : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase_ : Optional[Any] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
UpperCamelCase__ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 700
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 0
|
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 702
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( a : Optional[int] ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
raise NotImplementedError()
| 703
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 0
|
'''simple docstring'''
from typing import List
import numpy as np
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = {key: len(_A ) for key, value in gen_kwargs.items() if isinstance(_A , _A )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
lowercase_ : Optional[Any] = max(lists_lengths.values() , default=0 )
return max(1 , _A )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for group_idx in range(_A ):
lowercase_ : str = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowercase_ : List[str] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowercase_ : Tuple = range(_A , start + num_shards_to_add )
shards_indices_per_group.append(_A )
return shards_indices_per_group
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = _number_of_shards_in_gen_kwargs(_A )
if num_shards == 1:
return [dict(_A )]
else:
lowercase_ : Any = _distribute_shards(num_shards=_A , max_num_jobs=_A )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_A , _A )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_A ) )
]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _A )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = {len(_A ) for value in gen_kwargs.values() if isinstance(_A , _A )}
lowercase_ : Any = {}
for size in list_sizes:
lowercase_ : str = list(range(_A ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowercase_ : Tuple = dict(_A )
for key, value in shuffled_kwargs.items():
if isinstance(_A , _A ):
lowercase_ : Tuple = [value[i] for i in indices_per_size[len(_A )]]
return shuffled_kwargs
| 705
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 0
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = split_dict._to_yaml_list()
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
lowercase_ : Union[str, Any] = SplitDict._from_yaml_list(lowerCamelCase_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowercase_ : Dict = None
# the split name of split_dict takes over the name of the split info object
lowercase_ : List[str] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=lowerCamelCase_ ), SplitInfo(dataset_name="my_dataset" )] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 706
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(_UpperCamelCase , 2 ) - pow(_UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_UpperCamelCase , 2 ) - pow(_UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_UpperCamelCase , 2 ) + pow(_UpperCamelCase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class _UpperCAmelCase ( __A ):
__lowerCamelCase: str = 'lxmert'
__lowerCamelCase: str = {}
def __init__( self : Optional[Any] , a : Dict=3_0_5_2_2 , a : int=7_6_8 , a : Optional[Any]=1_2 , a : Tuple=9_5_0_0 , a : List[str]=1_6_0_0 , a : Union[str, Any]=4_0_0 , a : Optional[int]=3_0_7_2 , a : List[str]="gelu" , a : Optional[Any]=0.1 , a : List[str]=0.1 , a : Tuple=5_1_2 , a : Union[str, Any]=2 , a : Optional[Any]=0.02 , a : int=1e-12 , a : int=9 , a : Dict=5 , a : Optional[Any]=5 , a : int=2_0_4_8 , a : Dict=4 , a : int=6.67 , a : List[Any]=True , a : Dict=True , a : Optional[Any]=True , a : List[str]=True , a : str=True , a : List[Any]=True , a : Optional[Any]=True , **a : Tuple , ):
'''simple docstring'''
lowercase_ : Dict = vocab_size
lowercase_ : Optional[int] = hidden_size
lowercase_ : int = num_attention_heads
lowercase_ : int = hidden_act
lowercase_ : List[Any] = intermediate_size
lowercase_ : str = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : int = type_vocab_size
lowercase_ : Tuple = initializer_range
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : Dict = num_qa_labels
lowercase_ : List[str] = num_object_labels
lowercase_ : Optional[Any] = num_attr_labels
lowercase_ : Tuple = l_layers
lowercase_ : Optional[Any] = x_layers
lowercase_ : List[str] = r_layers
lowercase_ : List[Any] = visual_feat_dim
lowercase_ : List[str] = visual_pos_dim
lowercase_ : Tuple = visual_loss_normalizer
lowercase_ : Optional[int] = task_matched
lowercase_ : Tuple = task_mask_lm
lowercase_ : Union[str, Any] = task_obj_predict
lowercase_ : List[Any] = task_qa
lowercase_ : Dict = visual_obj_loss
lowercase_ : int = visual_attr_loss
lowercase_ : List[Any] = visual_feat_loss
lowercase_ : Union[str, Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**a )
| 708
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 0
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 709
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = "▁"
UpperCamelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
UpperCamelCase__ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
UpperCamelCase__ = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
UpperCamelCase__ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: Dict = ["input_ids", "attention_mask"]
__lowerCamelCase: List[int] = []
__lowerCamelCase: List[int] = []
def __init__( self : str , a : Dict , a : Optional[int]="<s>" , a : Dict="</s>" , a : List[str]="</s>" , a : int="<s>" , a : Union[str, Any]="<unk>" , a : Dict="<pad>" , a : List[Any]="<mask>" , a : str=None , a : Tuple=None , a : Optional[Any]=None , a : Optional[Any] = None , a : Tuple=None , a : Optional[Any]=False , **a : str , ):
'''simple docstring'''
lowercase_ : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ : Any = legacy_behaviour
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
lowercase_ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : str = 1
lowercase_ : Union[str, Any] = len(self.sp_model )
lowercase_ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
lowercase_ : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : Any = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : Optional[Any] = src_lang if src_lang is not None else "eng_Latn"
lowercase_ : Optional[int] = self.lang_code_to_id[self._src_lang]
lowercase_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
'''simple docstring'''
lowercase_ : int = self.__dict__.copy()
lowercase_ : str = None
lowercase_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , a : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase_ : Dict = {}
lowercase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self : List[Any] , a : List[str] ):
'''simple docstring'''
lowercase_ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self : List[str] , a : Any , a : Dict = None , a : Any = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
lowercase_ : Optional[int] = [1] * len(self.prefix_tokens )
lowercase_ : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self : List[Any] , a : Any , a : Optional[int] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self : Dict , a : Optional[Any] , a : Dict = None ):
'''simple docstring'''
lowercase_ : Optional[Any] = [self.sep_token_id]
lowercase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : Any , a : Tuple , a : List[Any] , **a : str ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowercase_ : Union[str, Any] = src_lang
lowercase_ : str = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ : Union[str, Any] = self.convert_tokens_to_ids(UpperCamelCase_ )
lowercase_ : Optional[int] = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Optional[Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self : List[Any] , a : Optional[Any] ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Union[str, Any] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self : Optional[int] , a : List[Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self : int , a : Tuple ):
'''simple docstring'''
lowercase_ : str = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def lowerCAmelCase__ ( self : Dict , a : Dict , a : int = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
lowercase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def lowerCAmelCase__ ( self : List[str] , a : Union[str, Any] , a : str = "eng_Latn" , a : List[Any] = None , a : int = "fra_Latn" , **a : Any , ):
'''simple docstring'''
lowercase_ : Tuple = src_lang
lowercase_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self : Dict , a : str ):
'''simple docstring'''
lowercase_ : int = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase_ : Union[str, Any] = []
lowercase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Dict = [self.cur_lang_code]
lowercase_ : Any = [self.eos_token_id]
def lowerCAmelCase__ ( self : Union[str, Any] , a : str ):
'''simple docstring'''
lowercase_ : int = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase_ : Union[str, Any] = []
lowercase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Tuple = [self.cur_lang_code]
lowercase_ : Any = [self.eos_token_id]
| 710
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 0
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCamelCase__ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = 'maskformer'
__lowerCamelCase: str = {'hidden_size': 'mask_feature_size'}
__lowerCamelCase: Optional[int] = ['resnet', 'swin']
__lowerCamelCase: Any = ['detr']
def __init__( self : str , a : int = 2_5_6 , a : int = 2_5_6 , a : float = 0.1 , a : bool = False , a : Optional[Dict] = None , a : Optional[Dict] = None , a : float = 0.02 , a : float = 1.0 , a : float = 1.0 , a : float = 1.0 , a : float = 20.0 , a : Optional[bool] = None , **a : List[str] , ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase_ : int = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__A , __A ):
lowercase_ : Optional[Any] = backbone_config.pop("model_type" )
lowercase_ : str = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Union[str, Any] = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase_ : Optional[Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase_ : Dict = (
decoder_config.pop("model_type" ) if isinstance(__A , __A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(__A , __A ):
lowercase_ : str = CONFIG_MAPPING[decoder_type]
lowercase_ : List[str] = config_class.from_dict(__A )
lowercase_ : Tuple = backbone_config
lowercase_ : str = decoder_config
# main feature dimension for the model
lowercase_ : Dict = fpn_feature_size
lowercase_ : Any = mask_feature_size
# initializer
lowercase_ : Optional[int] = init_std
lowercase_ : str = init_xavier_std
# Hungarian matcher && loss
lowercase_ : Optional[int] = cross_entropy_weight
lowercase_ : Union[str, Any] = dice_weight
lowercase_ : Optional[Any] = mask_weight
lowercase_ : Any = use_auxiliary_loss
lowercase_ : List[Any] = no_object_weight
lowercase_ : List[Any] = output_auxiliary_logits
lowercase_ : int = self.decoder_config.encoder_attention_heads
lowercase_ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**__A )
@classmethod
def lowerCAmelCase__ ( cls : Any , a : PretrainedConfig , a : PretrainedConfig , **a : Optional[int] ):
'''simple docstring'''
return cls(
backbone_config=__A , decoder_config=__A , **__A , )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = copy.deepcopy(self.__dict__ )
lowercase_ : Dict = self.backbone_config.to_dict()
lowercase_ : Union[str, Any] = self.decoder_config.to_dict()
lowercase_ : Tuple = self.__class__.model_type
return output
| 711
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
__lowerCamelCase: List[Any] = LxmertTokenizer
__lowerCamelCase: List[Any] = LxmertTokenizerFast
__lowerCamelCase: Optional[Any] = True
__lowerCamelCase: List[Any] = True
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase_ : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self : Tuple , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[str] = """UNwant\u00E9d,running"""
lowercase_ : str = """unwanted, running"""
return input_text, output_text
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
lowercase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ : Dict = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Any = """I was born in 92000, and this is falsé."""
lowercase_ : Union[str, Any] = tokenizer.tokenize(_a )
lowercase_ : Tuple = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase_ : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
lowercase_ : int = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = tokenizer.encode(_a )
lowercase_ : int = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 712
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
import numpy
# List of input, output pairs
UpperCamelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCamelCase__ = [2, 4, 1, 5]
UpperCamelCase__ = len(train_data)
UpperCamelCase__ = 0.0_09
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase="train" ):
"""simple docstring"""
return calculate_hypothesis_value(__UpperCAmelCase , __UpperCAmelCase ) - output(
__UpperCAmelCase , __UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = 0
for i in range(len(__UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=m ):
"""simple docstring"""
lowercase_ : Optional[Any] = 0
for i in range(__UpperCAmelCase ):
if index == -1:
summation_value += _error(__UpperCAmelCase )
else:
summation_value += _error(__UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = summation_of_cost_derivative(__UpperCAmelCase , __UpperCAmelCase ) / m
return cost_derivative_value
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase_ : str = 0.000002
lowercase_ : Optional[int] = 0
lowercase_ : Dict = 0
while True:
j += 1
lowercase_ : Optional[Any] = [0, 0, 0, 0]
for i in range(0 , len(__UpperCAmelCase ) ):
lowercase_ : Optional[Any] = get_cost_derivative(i - 1 )
lowercase_ : List[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__UpperCAmelCase , __UpperCAmelCase , atol=__UpperCAmelCase , rtol=__UpperCAmelCase , ):
break
lowercase_ : Union[str, Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
for i in range(len(__UpperCAmelCase ) ):
print(("Actual output value:", output(__UpperCAmelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__UpperCAmelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 713
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = 'vit_msn'
def __init__( self : int , a : str=7_6_8 , a : List[Any]=1_2 , a : Optional[int]=1_2 , a : Any=3_0_7_2 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.0 , a : List[str]=0.0 , a : int=0.02 , a : Tuple=1e-06 , a : int=2_2_4 , a : Tuple=1_6 , a : Optional[int]=3 , a : Union[str, Any]=True , **a : List[Any] , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase_ : Any = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : int = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Tuple = initializer_range
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : int = image_size
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : List[str] = qkv_bias
| 714
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase__ = logging.get_logger(__name__)
# General docstring
UpperCamelCase__ = "RegNetConfig"
# Base docstring
UpperCamelCase__ = "facebook/regnet-y-040"
UpperCamelCase__ = [1, 1088, 7, 7]
# Image classification docstring
UpperCamelCase__ = "facebook/regnet-y-040"
UpperCamelCase__ = "tabby, tabby cat"
UpperCamelCase__ = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , a : int , a : int = 3 , a : int = 1 , a : int = 1 , a : Optional[str] = "relu" , **a : str , ):
'''simple docstring'''
super().__init__(**_UpperCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ : Optional[int] = tf.keras.layers.ConvaD(
filters=_UpperCamelCase , kernel_size=_UpperCamelCase , strides=_UpperCamelCase , padding="VALID" , groups=_UpperCamelCase , use_bias=_UpperCamelCase , name="convolution" , )
lowercase_ : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
lowercase_ : Optional[Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : List[Any] , a : int ):
'''simple docstring'''
lowercase_ : List[Any] = self.convolution(self.padding(_UpperCamelCase ) )
lowercase_ : List[Any] = self.normalization(_UpperCamelCase )
lowercase_ : Optional[Any] = self.activation(_UpperCamelCase )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , a : RegNetConfig , **a : Union[str, Any] ):
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowercase_ : List[str] = config.num_channels
lowercase_ : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self : List[Any] , a : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = shape_list(_UpperCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ : Union[str, Any] = tf.transpose(_UpperCamelCase , perm=(0, 2, 3, 1) )
lowercase_ : Optional[int] = self.embedder(_UpperCamelCase )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Any , a : int , a : int = 2 , **a : List[Any] ):
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowercase_ : Any = tf.keras.layers.ConvaD(
filters=_UpperCamelCase , kernel_size=1 , strides=_UpperCamelCase , use_bias=_UpperCamelCase , name="convolution" )
lowercase_ : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self : str , a : tf.Tensor , a : bool = False ):
'''simple docstring'''
return self.normalization(self.convolution(_UpperCamelCase ) , training=_UpperCamelCase )
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , a : int , a : int , **a : List[Any] ):
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowercase_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCamelCase , name="pooler" )
lowercase_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCamelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCamelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self : Dict , a : int ):
'''simple docstring'''
lowercase_ : str = self.pooler(_UpperCamelCase )
for layer_module in self.attention:
lowercase_ : str = layer_module(_UpperCamelCase )
lowercase_ : Dict = hidden_state * pooled
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : int , a : RegNetConfig , a : int , a : int , a : int = 1 , **a : List[str] ):
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
TFRegNetShortCut(_UpperCamelCase , stride=_UpperCamelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ : Optional[Any] = [
TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase , name="layer.2" ),
]
lowercase_ : Dict = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Dict , a : Any ):
'''simple docstring'''
lowercase_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
lowercase_ : Optional[Any] = layer_module(_UpperCamelCase )
lowercase_ : Optional[Any] = self.shortcut(_UpperCamelCase )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(_UpperCamelCase )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Dict , a : RegNetConfig , a : int , a : int , a : int = 1 , **a : str ):
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowercase_ : Optional[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[Any] = max(1 , out_channels // config.groups_width )
lowercase_ : Any = (
TFRegNetShortCut(_UpperCamelCase , stride=_UpperCamelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowercase_ : Any = [
TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase , name="layer.3" ),
]
lowercase_ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : List[str] , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : str = hidden_state
for layer_module in self.layers:
lowercase_ : Tuple = layer_module(_UpperCamelCase )
lowercase_ : Optional[int] = self.shortcut(_UpperCamelCase )
hidden_state += residual
lowercase_ : Any = self.activation(_UpperCamelCase )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Any , a : RegNetConfig , a : int , a : int , a : int = 2 , a : int = 2 , **a : int ):
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowercase_ : Any = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
lowercase_ : List[Any] = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , name="layers.0" ),
*[layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : Tuple , a : str ):
'''simple docstring'''
for layer_module in self.layers:
lowercase_ : Union[str, Any] = layer_module(_UpperCamelCase )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : int , a : RegNetConfig , **a : Any ):
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowercase_ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowercase_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , depth=_UpperCamelCase , name=f"""stages.{i+1}""" ) )
def lowerCAmelCase__ ( self : Union[str, Any] , a : tf.Tensor , a : bool = False , a : bool = True ):
'''simple docstring'''
lowercase_ : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Tuple = stage_module(_UpperCamelCase )
if output_hidden_states:
lowercase_ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase , hidden_states=_UpperCamelCase )
@keras_serializable
class _UpperCAmelCase ( tf.keras.layers.Layer ):
__lowerCamelCase: Union[str, Any] = RegNetConfig
def __init__( self : Optional[Any] , a : List[str] , **a : Any ):
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowercase_ : Optional[Any] = config
lowercase_ : Dict = TFRegNetEmbeddings(_UpperCamelCase , name="embedder" )
lowercase_ : Optional[int] = TFRegNetEncoder(_UpperCamelCase , name="encoder" )
lowercase_ : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCamelCase , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self : Optional[int] , a : tf.Tensor , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , ):
'''simple docstring'''
lowercase_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.embedder(_UpperCamelCase , training=_UpperCamelCase )
lowercase_ : int = self.encoder(
_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , training=_UpperCamelCase )
lowercase_ : Dict = encoder_outputs[0]
lowercase_ : Any = self.pooler(_UpperCamelCase )
# Change to NCHW output format have uniformity in the modules
lowercase_ : Optional[int] = tf.transpose(_UpperCamelCase , perm=(0, 3, 1, 2) )
lowercase_ : Any = tf.transpose(_UpperCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ : int = tuple([tf.transpose(_UpperCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCamelCase , pooler_output=_UpperCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _UpperCAmelCase ( __lowerCAmelCase ):
__lowerCamelCase: Tuple = RegNetConfig
__lowerCamelCase: int = 'regnet'
__lowerCamelCase: str = 'pixel_values'
@property
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
UpperCamelCase__ = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCamelCase__ = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __lowerCAmelCase , )
class _UpperCAmelCase ( __lowerCAmelCase ):
def __init__( self : List[Any] , a : RegNetConfig , *a : int , **a : List[Any] ):
'''simple docstring'''
super().__init__(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
lowercase_ : int = TFRegNetMainLayer(_UpperCamelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : List[Any] , a : tf.Tensor , a : Optional[bool] = None , a : Optional[bool] = None , a : Tuple=False , ):
'''simple docstring'''
lowercase_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : List[str] = self.regnet(
pixel_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , training=_UpperCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __lowerCAmelCase , )
class _UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ):
def __init__( self : str , a : RegNetConfig , *a : Optional[Any] , **a : List[Any] ):
'''simple docstring'''
super().__init__(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
lowercase_ : Any = config.num_labels
lowercase_ : Union[str, Any] = TFRegNetMainLayer(_UpperCamelCase , name="regnet" )
# classification head
lowercase_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : Optional[int] , a : tf.Tensor = None , a : tf.Tensor = None , a : bool = None , a : bool = None , a : List[Any]=False , ):
'''simple docstring'''
lowercase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : List[str] = self.regnet(
_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , training=_UpperCamelCase )
lowercase_ : Dict = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : Any = self.classifier[0](_UpperCamelCase )
lowercase_ : List[Any] = self.classifier[1](_UpperCamelCase )
lowercase_ : List[Any] = None if labels is None else self.hf_compute_loss(labels=_UpperCamelCase , logits=_UpperCamelCase )
if not return_dict:
lowercase_ : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCamelCase , logits=_UpperCamelCase , hidden_states=outputs.hidden_states )
| 715
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 0
|
'''simple docstring'''
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return np.array_equal(_lowerCamelCase , matrix.conjugate().T )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = v.conjugate().T
lowercase_ : Union[str, Any] = v_star.dot(_lowerCamelCase )
assert isinstance(_lowerCamelCase , np.ndarray )
return (v_star_dot.dot(_lowerCamelCase )) / (v_star.dot(_lowerCamelCase ))
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Tuple = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowercase_ : Dict = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCamelCase ), F"""{a} is not hermitian."""
print(rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) )
lowercase_ : Optional[int] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCamelCase ), F"""{a} is not hermitian."""
assert rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 716
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( _A ):
__lowerCamelCase: str = 'encoder-decoder'
__lowerCamelCase: Any = True
def __init__( self : Dict , **a : List[Any] ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase_ : List[Any] = kwargs.pop("encoder" )
lowercase_ : List[Any] = encoder_config.pop("model_type" )
lowercase_ : Optional[int] = kwargs.pop("decoder" )
lowercase_ : Optional[int] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowercase_ : int = AutoConfig.for_model(__lowerCamelCase , **__lowerCamelCase )
lowercase_ : str = AutoConfig.for_model(__lowerCamelCase , **__lowerCamelCase )
lowercase_ : int = True
@classmethod
def lowerCAmelCase__ ( cls : List[str] , a : PretrainedConfig , a : PretrainedConfig , **a : List[Any] ):
'''simple docstring'''
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowercase_ : Optional[int] = True
lowercase_ : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCamelCase )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = copy.deepcopy(self.__dict__ )
lowercase_ : List[Any] = self.encoder.to_dict()
lowercase_ : Union[str, Any] = self.decoder.to_dict()
lowercase_ : Dict = self.__class__.model_type
return output
| 717
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 0
|
'''simple docstring'''
import math
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , a : List[str]=0 ): # a graph with Node 0,1,...,N-1
'''simple docstring'''
lowercase_ : Tuple = n
lowercase_ : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
lowercase_ : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase__ ( self : Dict , a : Optional[int] , a : Union[str, Any] , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = w
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase_ : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase__ ( self : int , a : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 718
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_enforce_args(_A , _A )
if n == 0:
return 0
lowercase_ : Optional[Any] = float("-inf" )
for i in range(1 , n + 1 ):
lowercase_ : Optional[Any] = max(
_A , prices[i - 1] + naive_cut_rod_recursive(n - i , _A ) )
return max_revue
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_enforce_args(_A , _A )
lowercase_ : List[Any] = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_A , _A , _A )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase_ : List[Any] = float("-inf" )
for i in range(1 , n + 1 ):
lowercase_ : Tuple = max(
_A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _A , _A ) , )
lowercase_ : int = max_revenue
return max_rev[n]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_enforce_args(_A , _A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase_ : Optional[int] = [float("-inf" ) for _ in range(n + 1 )]
lowercase_ : Tuple = 0
for i in range(1 , n + 1 ):
lowercase_ : Union[str, Any] = max_rev[i]
for j in range(1 , i + 1 ):
lowercase_ : Optional[int] = max(_A , prices[j - 1] + max_rev[i - j] )
lowercase_ : int = max_revenue_i
return max_rev[n]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if n < 0:
lowercase_ : Dict = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(_A )
if n > len(_A ):
lowercase_ : Union[str, Any] = (
"Each integral piece of rod must have a corresponding price. "
F"""Got n = {n} but length of prices = {len(_A )}"""
)
raise ValueError(_A )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = [6, 10, 12, 15, 20, 23]
lowercase_ : Optional[int] = len(_A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase_ : Dict = 36
lowercase_ : Optional[int] = top_down_cut_rod(_A , _A )
lowercase_ : int = bottom_up_cut_rod(_A , _A )
lowercase_ : str = naive_cut_rod_recursive(_A , _A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 719
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( _a , unittest.TestCase ):
__lowerCamelCase: Dict = DebertaTokenizer
__lowerCamelCase: Optional[int] = True
__lowerCamelCase: Optional[Any] = DebertaTokenizerFast
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
lowercase_ : List[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
lowercase_ : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase_ : Optional[Any] = {"unk_token": "[UNK]"}
lowercase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
def lowerCAmelCase__ ( self : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[Any] ):
'''simple docstring'''
lowercase_ : List[str] = "lower newer"
lowercase_ : Any = "lower newer"
return input_text, output_text
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = "lower newer"
lowercase_ : Any = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowercase_ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase_ : Optional[int] = tokens + [tokenizer.unk_token]
lowercase_ : str = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.get_tokenizer()
lowercase_ : str = tokenizer("Hello" , "World" )
lowercase_ : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , snake_case_ )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowercase_ : Tuple = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
lowercase_ : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
lowercase_ : Any = tokenizer.encode(
"sequence builders" , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
lowercase_ : int = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
lowercase_ : List[str] = tokenizer.build_inputs_with_special_tokens(snake_case_ )
lowercase_ : Any = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Tuple = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowercase_ : Optional[Any] = tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowercase_ : str = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
lowercase_ : Dict = tokenizer(snake_case_ , padding=snake_case_ )
lowercase_ : List[str] = [tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) for seq in encoding["input_ids"]]
# fmt: off
lowercase_ : int = {
"input_ids": [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowercase_ : List[Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , snake_case_ )
for expected, decoded in zip(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , snake_case_ )
| 720
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 0
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
lowercase_ : Optional[Any] = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 721
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _UpperCAmelCase :
def __init__( self : str , a : Union[str, Any] , a : Optional[Any]=1_4 , a : Any=7 , a : Union[str, Any]=True , a : Optional[int]=True , a : Optional[Any]=False , a : int=True , a : Any=9_9 , a : Dict=3_2 , a : str=4 , a : Any=4 , a : Optional[Any]=4 , a : List[Any]=3_7 , a : Optional[int]="gelu" , a : Dict=0.1 , a : Optional[int]=0.1 , a : Any=5_1_2 , a : int=0.02 , ):
'''simple docstring'''
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : Optional[Any] = seq_length
lowercase_ : int = is_training
lowercase_ : List[Any] = use_input_mask
lowercase_ : Union[str, Any] = use_token_type_ids
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : str = rotary_dim
lowercase_ : Any = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : List[str] = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : List[str] = initializer_range
lowercase_ : List[Any] = None
lowercase_ : Tuple = vocab_size - 1
lowercase_ : int = vocab_size - 1
lowercase_ : Dict = vocab_size - 1
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[int] = None
if self.use_input_mask:
lowercase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Optional[Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : int = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Tuple = config_and_inputs
lowercase_ : Tuple = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : Dict , a : Any , a : List[str] , a : List[str] , a : Tuple ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 2_0
lowercase_ : Optional[Any] = model_class_name(A_ )
lowercase_ : Dict = model.init_cache(input_ids.shape[0] , A_ )
lowercase_ : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowercase_ : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase_ : int = model(
input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , )
lowercase_ : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowercase_ : Optional[Any] = model(
input_ids[:, -1:] , attention_mask=A_ , past_key_values=outputs_cache.past_key_values , position_ids=A_ , )
lowercase_ : Optional[Any] = model(A_ )
lowercase_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowerCAmelCase__ ( self : str , a : Optional[int] , a : Optional[int] , a : Tuple , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = 2_0
lowercase_ : List[str] = model_class_name(A_ )
lowercase_ : int = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowercase_ : List[str] = model.init_cache(input_ids.shape[0] , A_ )
lowercase_ : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase_ : List[Any] = model(
input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , )
lowercase_ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowercase_ : List[str] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=A_ , position_ids=A_ , )
lowercase_ : List[str] = model(A_ , attention_mask=A_ )
lowercase_ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _UpperCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
__lowerCamelCase: List[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase: Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[Any] = FlaxGPTJModelTester(self )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase_ , lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(A_ , A_ , A_ , A_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase_ , lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
A_ , A_ , A_ , A_ )
@tooslow
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : int = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
lowercase_ : str = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=A_ , truncation=A_ )
lowercase_ : str = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
lowercase_ : str = False
lowercase_ : Any = model.config.eos_token_id
lowercase_ : Dict = jax.jit(model.generate )
lowercase_ : str = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
lowercase_ : str = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
lowercase_ : List[str] = [
"Hello this is a long string of text.\n\nI\'m trying to get the text of the",
"Hey, I\'m a little late to the party. I\'m going to",
]
self.assertListEqual(A_ , A_ )
@is_pt_flax_cross_test
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase_ : Dict = self._prepare_for_class(A_ , A_ )
lowercase_ : Dict = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase_ : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase_ : List[Any] = getattr(A_ , A_ )
lowercase_ , lowercase_ : Any = pt_inputs["input_ids"].shape
lowercase_ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
lowercase_ : Union[str, Any] = 0
lowercase_ : Dict = 1
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = 1
lowercase_ : Optional[int] = pt_model_class(A_ ).eval()
lowercase_ : List[Any] = model_class(A_ , dtype=jnp.floataa )
lowercase_ : Any = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , A_ )
lowercase_ : Union[str, Any] = fx_state
with torch.no_grad():
lowercase_ : int = pt_model(**A_ ).to_tuple()
lowercase_ : Union[str, Any] = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(A_ )
lowercase_ : Optional[Any] = model_class.from_pretrained(A_ , from_pt=A_ )
lowercase_ : List[Any] = fx_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) , len(A_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase_ : Union[str, Any] = self._prepare_for_class(A_ , A_ )
lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase_ : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase_ : Tuple = getattr(A_ , A_ )
lowercase_ : Optional[int] = pt_model_class(A_ ).eval()
lowercase_ : str = model_class(A_ , dtype=jnp.floataa )
lowercase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(A_ , fx_model.params )
lowercase_ , lowercase_ : Tuple = pt_inputs["input_ids"].shape
lowercase_ : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
lowercase_ : Any = 0
lowercase_ : Optional[Any] = 1
lowercase_ : Union[str, Any] = 0
lowercase_ : List[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase_ : Dict = pt_model(**A_ ).to_tuple()
lowercase_ : Union[str, Any] = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(A_ )
lowercase_ : Any = pt_model_class.from_pretrained(A_ , from_flax=A_ )
with torch.no_grad():
lowercase_ : int = pt_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) , len(A_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase_ : Any = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
lowercase_ : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
| 700
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 0
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : int = HfArgumentParser(__SCREAMING_SNAKE_CASE )
lowercase_ : int = parser.parse_args_into_dataclasses()[0]
lowercase_ : Dict = TensorFlowBenchmark(args=__SCREAMING_SNAKE_CASE )
try:
lowercase_ : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase_ : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowercase_ : Optional[int] = " ".join(str(__SCREAMING_SNAKE_CASE ).split(" " )[:-1] )
lowercase_ : Dict = ""
lowercase_ : int = eval(str(__SCREAMING_SNAKE_CASE ).split(" " )[-1] )
lowercase_ : str = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : Tuple = full_error_msg + begin_error_msg + str(__SCREAMING_SNAKE_CASE )
raise ValueError(__SCREAMING_SNAKE_CASE )
benchmark.run()
if __name__ == "__main__":
main()
| 701
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
UpperCamelCase__ = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[str] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase_ : Dict = bs[:]
lowercase_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowercase_ : Any = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = set()
lowercase_ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : List[str] = char
return pairs
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase: Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: int = ['input_ids', 'attention_mask']
def __init__( self : str , a : Optional[int] , a : Dict , a : Dict="replace" , a : Tuple="<s>" , a : Any="</s>" , a : Union[str, Any]="</s>" , a : Tuple="<s>" , a : int="<unk>" , a : List[Any]="<pad>" , a : Any="<mask>" , a : Tuple=False , **a : List[str] , ):
'''simple docstring'''
lowercase_ : List[str] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase_ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase_ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase_ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase_ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Optional[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding="utf-8" ) as vocab_handle:
lowercase_ : Tuple = json.load(a )
lowercase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowercase_ : Optional[Any] = errors # how to handle errors in decoding
lowercase_ : Any = bytes_to_unicode()
lowercase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
lowercase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowercase_ : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ : Tuple = dict(zip(a , range(len(a ) ) ) )
lowercase_ : List[Any] = {}
lowercase_ : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ : int = re.compile(R"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : Optional[Any] , a : Dict ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase_ : int = tuple(a )
lowercase_ : Tuple = get_pairs(a )
if not pairs:
return token
while True:
lowercase_ : int = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : Optional[int] = bigram
lowercase_ : int = []
lowercase_ : Any = 0
while i < len(a ):
try:
lowercase_ : Any = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : Union[str, Any] = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : Tuple = tuple(a )
lowercase_ : int = new_word
if len(a ) == 1:
break
else:
lowercase_ : str = get_pairs(a )
lowercase_ : Dict = " ".join(a )
lowercase_ : Dict = word
return word
def lowerCAmelCase__ ( self : Tuple , a : str ):
'''simple docstring'''
lowercase_ : Optional[int] = []
for token in re.findall(self.pat , a ):
lowercase_ : Union[str, Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(" " ) )
return bpe_tokens
def lowerCAmelCase__ ( self : str , a : str ):
'''simple docstring'''
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Any , a : Dict ):
'''simple docstring'''
return self.decoder.get(a )
def lowerCAmelCase__ ( self : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "".join(a )
lowercase_ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase__ ( self : List[str] , a : str , a : Union[str, Any] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ : Union[str, Any] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
lowercase_ : Tuple = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase_ : Any = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self : Union[str, Any] , a : Dict , a : List[Any] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : Optional[int] = [self.cls_token_id]
lowercase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[Any] , a : int = None , a : int = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[Any] , a : Any = None ):
'''simple docstring'''
lowercase_ : List[Any] = [self.sep_token_id]
lowercase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : Tuple=False , **a : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase_ : Dict = " " + text
return (text, kwargs)
| 702
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 0
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
UpperCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCamelCase__ = concatenate_datasets
UpperCamelCase__ = DownloadConfig
UpperCamelCase__ = DownloadManager
UpperCamelCase__ = DownloadMode
UpperCamelCase__ = DownloadConfig
UpperCamelCase__ = DownloadMode
UpperCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 703
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 0
|
from collections.abc import Iterable
from typing import Any
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , a : int | None = None ):
'''simple docstring'''
lowercase_ : Dict = value
lowercase_ : Node | None = None # Added in order to delete a node easier
lowercase_ : Node | None = None
lowercase_ : Node | None = None
def __repr__( self : int ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class _UpperCAmelCase :
def __init__( self : Optional[int] , a : Node | None = None ):
'''simple docstring'''
lowercase_ : Dict = root
def __str__( self : str ):
'''simple docstring'''
return str(self.root )
def lowerCAmelCase__ ( self : str , a : Node , a : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
lowercase_ : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(a ): # If it is the right children
lowercase_ : Dict = new_children
else:
lowercase_ : Any = new_children
else:
lowercase_ : List[str] = new_children
def lowerCAmelCase__ ( self : str , a : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.root is None
def lowerCAmelCase__ ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowercase_ : List[Any] = Node(a ) # create a new Node
if self.empty(): # if Tree is empty
lowercase_ : Dict = new_node # set its root
else: # Tree is not empty
lowercase_ : Optional[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowercase_ : List[Any] = new_node # We insert the new node in a leaf
break
else:
lowercase_ : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
lowercase_ : Optional[int] = new_node
break
else:
lowercase_ : Optional[Any] = parent_node.right
lowercase_ : List[str] = parent_node
def lowerCAmelCase__ ( self : Union[str, Any] , *a : int ):
'''simple docstring'''
for value in values:
self.__insert(a )
def lowerCAmelCase__ ( self : Any , a : Any ):
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowercase_ : Tuple = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowercase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def lowerCAmelCase__ ( self : Optional[Any] , a : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowercase_ : str = self.root
if not self.empty():
while node.right is not None:
lowercase_ : int = node.right
return node
def lowerCAmelCase__ ( self : List[Any] , a : Node | None = None ):
'''simple docstring'''
if node is None:
lowercase_ : Tuple = self.root
if self.root is None:
return None
if not self.empty():
lowercase_ : Union[str, Any] = self.root
while node.left is not None:
lowercase_ : Tuple = node.left
return node
def lowerCAmelCase__ ( self : Optional[Any] , a : int ):
'''simple docstring'''
lowercase_ : Dict = self.search(a ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(a , a )
elif node.left is None: # Has only right children
self.__reassign_nodes(a , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(a , node.left )
else:
lowercase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowercase_ : Union[str, Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCAmelCase__ ( self : Any , a : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCAmelCase__ ( self : str , a : List[Any]=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCAmelCase__ ( self : List[Any] , a : list , a : Node | None ):
'''simple docstring'''
if node:
self.inorder(a , node.left )
arr.append(node.value )
self.inorder(a , node.right )
def lowerCAmelCase__ ( self : Optional[Any] , a : int , a : Node ):
'''simple docstring'''
lowercase_ : list[int] = []
self.inorder(a , a ) # append all values to list using inorder traversal
return arr[k - 1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
if curr_node is not None:
lowercase_ : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[str] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowercase_ : List[str] = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 704
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__ = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class _UpperCAmelCase :
__lowerCamelCase: Optional[int] = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase: Any = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase: Dict = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase: str = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class _UpperCAmelCase :
__lowerCamelCase: Optional[int] = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
__lowerCamelCase: int = field(metadata={'help': 'Should contain the data files for the task.'} )
__lowerCamelCase: str = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase: List[Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase_ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
lowercase_ : List[str] = processors[data_args.task_name]()
lowercase_ : Optional[int] = processor.get_labels()
lowercase_ : List[str] = len(_UpperCamelCase )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase_ : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase_ : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase_ : List[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_UpperCamelCase ) -> Dict:
lowercase_ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_UpperCamelCase , p.label_ids )}
# Data collator
lowercase_ : str = DataCollatorWithPadding(_UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase_ : Optional[Any] = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , compute_metrics=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase_ : List[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase_ : List[str] = trainer.evaluate()
lowercase_ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(_UpperCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , _UpperCamelCase , _UpperCamelCase )
writer.write("%s = %s\n" % (key, value) )
results.update(_UpperCamelCase )
return results
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 705
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 0
|
'''simple docstring'''
import pytest
UpperCamelCase__ = '__dummy_dataset1__'
UpperCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = dataset_loading_script_name
lowercase_ : Optional[Any] = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=snake_case__ )
lowercase_ : Tuple = script_dir / F"""{script_name}.py"""
with open(snake_case__ , "w" ) as f:
f.write(snake_case__ )
return str(snake_case__ )
| 706
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCamelCase__ = False
class _UpperCAmelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase_ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowercase_ : Union[str, Any] = torch.manual_seed(0 )
lowercase_ : int = pipe(
image=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
lowercase_ : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ : List[str] = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 707
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
import sys
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = len(UpperCAmelCase__ )
lowercase_ : Any = [[0 for x in range(UpperCAmelCase__ )] for x in range(UpperCAmelCase__ )]
lowercase_ : List[str] = [[0 for x in range(UpperCAmelCase__ )] for x in range(UpperCAmelCase__ )]
for chain_length in range(2 , UpperCAmelCase__ ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ : Optional[int] = a + chain_length - 1
lowercase_ : List[str] = sys.maxsize
for c in range(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ : Optional[Any] = cost
lowercase_ : str = c
return matrix, sol
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if i == j:
print("A" + str(UpperCAmelCase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(UpperCAmelCase__ , UpperCAmelCase__ , optimal_solution[i][j] )
print_optiomal_solution(UpperCAmelCase__ , optimal_solution[i][j] + 1 , UpperCAmelCase__ )
print(")" , end=" " )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : int = [30, 35, 15, 5, 10, 20, 25]
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ : str = matrix_chain_order(UpperCAmelCase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(UpperCAmelCase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 708
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( a__ ):
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_attention_heads" ) )
class _UpperCAmelCase :
def __init__( self : List[str] , a : Optional[int] , a : List[Any]=1_3 , a : List[Any]=3_2 , a : List[str]=2 , a : List[Any]=3 , a : Optional[Any]=6_4_0 , a : Optional[int]=4 , a : Optional[int]="silu" , a : Optional[int]=3 , a : List[Any]=3_2 , a : Tuple=0.1 , a : Any=0.1 , a : List[Any]=0.1 , a : List[Any]=0.02 , a : int=True , a : Optional[Any]=True , a : Optional[Any]=1_0 , a : Union[str, Any]=None , ):
'''simple docstring'''
lowercase_ : List[Any] = parent
lowercase_ : Tuple = batch_size
lowercase_ : int = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : str = last_hidden_size
lowercase_ : List[str] = num_attention_heads
lowercase_ : Optional[int] = hidden_act
lowercase_ : Dict = conv_kernel_size
lowercase_ : Any = output_stride
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : str = classifier_dropout_prob
lowercase_ : List[Any] = use_labels
lowercase_ : Dict = is_training
lowercase_ : int = num_labels
lowercase_ : Tuple = initializer_range
lowercase_ : Optional[int] = scope
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
lowercase_ : List[str] = None
if self.use_labels:
lowercase_ : int = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[str] , a : Tuple , a : List[Any] ):
'''simple docstring'''
lowercase_ : Tuple = MobileViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase_ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self : int , a : List[Any] , a : Optional[int] , a : List[Any] , a : Tuple ):
'''simple docstring'''
lowercase_ : Optional[int] = self.num_labels
lowercase_ : Optional[Any] = MobileViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase_ : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : int , a : Optional[int] , a : Dict , a : Union[str, Any] , a : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.num_labels
lowercase_ : List[Any] = MobileViTForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase_ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase_ : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ : Any = config_and_inputs
lowercase_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
__lowerCamelCase: str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase: List[Any] = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase: Optional[int] = False
__lowerCamelCase: List[str] = False
__lowerCamelCase: List[str] = False
__lowerCamelCase: Any = False
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[Any] = MobileViTModelTester(self )
lowercase_ : Union[str, Any] = MobileViTConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Union[str, Any] = model_class(lowerCAmelCase__ )
lowercase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : int = [*signature.parameters.keys()]
lowercase_ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a : Optional[Any] , a : Tuple , a : Optional[int] ):
lowercase_ : Optional[int] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase_ : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase_ : Any = outputs.hidden_states
lowercase_ : int = 5
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase_ : int = 2
for i in range(len(lowerCAmelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : str = MobileViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCAmelCase__ )
lowercase_ : Any = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[str] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase_ : List[str] = model(**lowerCAmelCase__ )
# verify the logits
lowercase_ : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
lowercase_ : Optional[Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowercase_ : int = model.to(lowerCAmelCase__ )
lowercase_ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowercase_ : Optional[int] = prepare_img()
lowercase_ : Any = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase_ : Tuple = model(**lowerCAmelCase__ )
lowercase_ : Union[str, Any] = outputs.logits
# verify the logits
lowercase_ : Dict = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
lowercase_ : List[str] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowerCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowercase_ : Dict = model.to(lowerCAmelCase__ )
lowercase_ : Optional[Any] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase_ : Tuple = model(**lowerCAmelCase__ )
lowercase_ : Union[str, Any] = outputs.logits.detach().cpu()
lowercase_ : int = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(5_0, 6_0)] )
lowercase_ : Tuple = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
lowercase_ : Tuple = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ )
lowercase_ : Optional[int] = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
| 709
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''YolosFeatureExtractor''']
UpperCamelCase__ = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
lowercase_ : List[str] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_snake_case )
if number < 1:
lowercase_ : Any = F"""Input value of [number={number}] must be > 0"""
raise ValueError(_snake_case )
lowercase_ : Union[str, Any] = 1
for i in range(1 , _snake_case ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( UpperCamelCase_ ):
__lowerCamelCase: Optional[Any] = ['image_processor', 'tokenizer']
__lowerCamelCase: Tuple = 'AutoImageProcessor'
__lowerCamelCase: List[str] = 'AutoTokenizer'
def __init__( self : Tuple , a : Dict , a : List[str] ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ : List[str] = self.image_processor
def __call__( self : Union[str, Any] , a : int=None , a : List[Any]=None , a : Any=None , **a : Any ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase_ : Optional[int] = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
lowercase_ : List[Any] = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
lowercase_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , *a : str , **a : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , *a : Any , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 712
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = RemBertConfig.from_json_file(__UpperCamelCase )
print("Building PyTorch model from configuration: {}".format(str(__UpperCamelCase ) ) )
lowercase_ : Union[str, Any] = RemBertModel(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(__UpperCamelCase ) )
torch.save(model.state_dict() , __UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 713
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for attribute in key.split("." ):
lowercase_ : int = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
lowercase_ : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
lowercase_ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase_ : Any = value
elif weight_type == "weight_g":
lowercase_ : List[Any] = value
elif weight_type == "weight_v":
lowercase_ : Any = value
elif weight_type == "bias":
lowercase_ : Optional[Any] = value
else:
lowercase_ : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = []
lowercase_ : Any = fairseq_model.state_dict()
lowercase_ : List[str] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase_ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ : str = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase_ : Union[str, Any] = True
if "*" in mapped_key:
lowercase_ : Optional[Any] = name.split(_UpperCamelCase )[0].split("." )[-2]
lowercase_ : int = mapped_key.replace("*" , _UpperCamelCase )
if "weight_g" in name:
lowercase_ : Optional[int] = """weight_g"""
elif "weight_v" in name:
lowercase_ : List[Any] = """weight_v"""
elif "weight" in name:
lowercase_ : List[str] = """weight"""
elif "bias" in name:
lowercase_ : Optional[int] = """bias"""
else:
lowercase_ : List[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = full_name.split("conv_layers." )[-1]
lowercase_ : Optional[Any] = name.split("." )
lowercase_ : int = int(items[0] )
lowercase_ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase_ : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase_ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase_ : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase_ : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = SEWConfig()
if is_finetuned:
lowercase_ : Union[str, Any] = model.wav_encoder.wav_model.cfg
else:
lowercase_ : Tuple = model.cfg
lowercase_ : List[str] = fs_config.conv_bias
lowercase_ : List[Any] = eval(fs_config.conv_feature_layers )
lowercase_ : Optional[Any] = [x[0] for x in conv_layers]
lowercase_ : Dict = [x[1] for x in conv_layers]
lowercase_ : List[str] = [x[2] for x in conv_layers]
lowercase_ : str = """gelu"""
lowercase_ : Tuple = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
lowercase_ : int = 0.0
lowercase_ : Union[str, Any] = fs_config.activation_fn.name
lowercase_ : str = fs_config.encoder_embed_dim
lowercase_ : Union[str, Any] = 0.02
lowercase_ : str = fs_config.encoder_ffn_embed_dim
lowercase_ : List[str] = 1e-5
lowercase_ : List[Any] = fs_config.encoder_layerdrop
lowercase_ : Union[str, Any] = fs_config.encoder_attention_heads
lowercase_ : Any = fs_config.conv_pos_groups
lowercase_ : Optional[Any] = fs_config.conv_pos
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : int = fs_config.encoder_layers
lowercase_ : List[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase_ : int = model.cfg
lowercase_ : Union[str, Any] = fs_config.final_dropout
lowercase_ : Tuple = fs_config.layerdrop
lowercase_ : Dict = fs_config.activation_dropout
lowercase_ : Union[str, Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase_ : str = fs_config.attention_dropout
lowercase_ : Any = fs_config.dropout_input
lowercase_ : Dict = fs_config.dropout
lowercase_ : str = fs_config.mask_channel_length
lowercase_ : Any = fs_config.mask_channel_prob
lowercase_ : List[Any] = fs_config.mask_length
lowercase_ : Dict = fs_config.mask_prob
lowercase_ : List[str] = """Wav2Vec2FeatureExtractor"""
lowercase_ : Tuple = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True ):
"""simple docstring"""
if is_finetuned:
lowercase_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase_ : int = SEWConfig.from_pretrained(_UpperCamelCase )
else:
lowercase_ : Dict = convert_config(model[0] , _UpperCamelCase )
lowercase_ : Any = model[0].eval()
lowercase_ : Optional[int] = True if config.feat_extract_norm == """layer""" else False
lowercase_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
if is_finetuned:
if dict_path:
lowercase_ : int = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ : Optional[Any] = target_dict.pad_index
lowercase_ : Any = target_dict.bos_index
lowercase_ : List[str] = target_dict.pad_index
lowercase_ : List[str] = target_dict.bos_index
lowercase_ : str = target_dict.eos_index
lowercase_ : Optional[Any] = len(target_dict.symbols )
lowercase_ : int = os.path.join(_UpperCamelCase , "vocab.json" )
if not os.path.isdir(_UpperCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase )
lowercase_ : Tuple = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_UpperCamelCase , )
lowercase_ : Any = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
lowercase_ : Optional[int] = SEWForCTC(_UpperCamelCase )
else:
lowercase_ : int = SEWModel(_UpperCamelCase )
feature_extractor.save_pretrained(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 714
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _UpperCAmelCase ( lowercase__ ):
__lowerCamelCase: List[Any] = """donut-swin"""
__lowerCamelCase: Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , a : Dict=2_2_4 , a : str=4 , a : Optional[int]=3 , a : Dict=9_6 , a : List[str]=[2, 2, 6, 2] , a : Union[str, Any]=[3, 6, 1_2, 2_4] , a : str=7 , a : Optional[Any]=4.0 , a : Tuple=True , a : List[str]=0.0 , a : int=0.0 , a : Optional[Any]=0.1 , a : str="gelu" , a : List[str]=False , a : Optional[Any]=0.02 , a : Optional[int]=1e-5 , **a : int , ):
'''simple docstring'''
super().__init__(**__lowercase )
lowercase_ : List[Any] = image_size
lowercase_ : List[Any] = patch_size
lowercase_ : List[Any] = num_channels
lowercase_ : Optional[int] = embed_dim
lowercase_ : Optional[int] = depths
lowercase_ : Dict = len(__lowercase )
lowercase_ : int = num_heads
lowercase_ : Optional[int] = window_size
lowercase_ : Any = mlp_ratio
lowercase_ : Dict = qkv_bias
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Tuple = drop_path_rate
lowercase_ : List[Any] = hidden_act
lowercase_ : Tuple = use_absolute_embeddings
lowercase_ : int = layer_norm_eps
lowercase_ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : List[str] = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
| 715
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( __snake_case , unittest.TestCase ):
__lowerCamelCase: List[Any] = KandinskyVaaInpaintPipeline
__lowerCamelCase: List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
__lowerCamelCase: List[Any] = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
__lowerCamelCase: List[Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__lowerCamelCase: Optional[int] = False
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowercase_ : Optional[int] = UNetaDConditionModel(**A_ )
return model
@property
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = self.dummy_unet
lowercase_ : Optional[Any] = self.dummy_movq
lowercase_ : Optional[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
lowercase_ : Dict = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self : Optional[int] , a : Optional[int] , a : Optional[int]=0 ):
'''simple docstring'''
lowercase_ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
lowercase_ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
lowercase_ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(A_ ) ).to(A_ )
lowercase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : str = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
lowercase_ : Dict = np.ones((6_4, 6_4) , dtype=np.floataa )
lowercase_ : Union[str, Any] = 0
if str(A_ ).startswith("mps" ):
lowercase_ : Tuple = torch.manual_seed(A_ )
else:
lowercase_ : Optional[int] = torch.Generator(device=A_ ).manual_seed(A_ )
lowercase_ : Optional[int] = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : int = "cpu"
lowercase_ : Dict = self.get_dummy_components()
lowercase_ : str = self.pipeline_class(**A_ )
lowercase_ : Any = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowercase_ : int = pipe(**self.get_dummy_inputs(A_ ) )
lowercase_ : List[Any] = output.images
lowercase_ : List[str] = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
lowercase_ : int = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
lowercase_ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowercase_ : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
lowercase_ : str = 0
lowercase_ : Tuple = "a hat"
lowercase_ : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
lowercase_ : Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
lowercase_ : int = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
lowercase_ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ : List[Any] = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowercase_ : Union[str, Any] = pipeline(
image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
lowercase_ : Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(A_ , A_ )
| 716
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if len(_UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
lowercase_ : Union[str, Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 0
|
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase__ = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=16 , _UpperCamelCase = 10 , _UpperCamelCase = 2 ):
"""simple docstring"""
def get_dataset(_UpperCamelCase ):
lowercase_ : Tuple = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCAmelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase_ : Dict = get_dataset(__lowerCAmelCase )
lowercase_ : Optional[int] = get_dataset(__lowerCAmelCase )
lowercase_ : Any = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
lowercase_ : str = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for epoch in range(__lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
lowercase_ : Optional[int] = batch
lowercase_ : Optional[int] = model(__lowerCAmelCase )
lowercase_ : Optional[int] = torch.nn.functional.mse_loss(__lowerCAmelCase , __lowerCAmelCase )
accelerator.backward(__lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Union[str, Any] ):
'''simple docstring'''
super().__init__()
lowercase_ : Any = nn.Parameter(torch.randn(1 ) )
lowercase_ : Tuple = nn.Parameter(torch.randn(1 ) )
def lowerCAmelCase__ ( self : str , a : Optional[int] ):
'''simple docstring'''
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowercase_ : List[Any] = DummyModel()
lowercase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : Union[str, Any] = dummy_dataloaders()
lowercase_ : Tuple = ProjectConfiguration(total_limit=1 , project_dir=lowerCamelCase__ , automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
lowercase_ : List[str] = Accelerator(project_config=lowerCamelCase__ )
lowercase_ : Optional[Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowercase_ : List[Any] = DummyModel()
lowercase_ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : Union[str, Any] = dummy_dataloaders()
# Train baseline
lowercase_ : List[str] = Accelerator()
lowercase_ : Optional[Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
lowercase_ : Any = os.path.join(lowerCamelCase__ , "initial" )
accelerator.save_state(lowerCamelCase__ )
(lowercase_) : Union[str, Any] = model.a.item(), model.b.item()
lowercase_ : str = optimizer.state_dict()
lowercase_ : Tuple = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(lowercase_) : Optional[int] = model.a.item(), model.b.item()
lowercase_ : List[str] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
lowercase_ : Optional[Any] = DummyModel()
lowercase_ : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : List[Any] = dummy_dataloaders()
lowercase_ : Union[str, Any] = Accelerator()
lowercase_ : Optional[Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(lowerCamelCase__ )
(lowercase_) : Tuple = model.a.item(), model.b.item()
lowercase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase_ : Optional[Any] = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
lowercase_ : Optional[Any] = os.path.join(lowerCamelCase__ , "checkpoint" )
accelerator.save_state(lowerCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCamelCase__ )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(lowercase_) : Union[str, Any] = model.a.item(), model.b.item()
lowercase_ : Dict = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowercase_ : Optional[Any] = DummyModel()
lowercase_ : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : Any = dummy_dataloaders()
lowercase_ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
lowercase_ : str = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
lowercase_ : Optional[Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
(lowercase_) : Optional[Any] = model.a.item(), model.b.item()
lowercase_ : Union[str, Any] = optimizer.state_dict()
lowercase_ : Optional[Any] = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(lowercase_) : Dict = model.a.item(), model.b.item()
lowercase_ : Optional[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
lowercase_ : Optional[Any] = DummyModel()
lowercase_ : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : Dict = dummy_dataloaders()
lowercase_ : Optional[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCamelCase__ )
lowercase_ : Optional[Any] = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
lowercase_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) )
(lowercase_) : int = model.a.item(), model.b.item()
lowercase_ : int = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase_ : str = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(lowercase_) : Dict = model.a.item(), model.b.item()
lowercase_ : Dict = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch.tensor([1, 2, 3] )
lowercase_ : Any = torch.tensor([2, 3, 4] )
lowercase_ : Optional[int] = DummyModel()
lowercase_ : List[Any] = torch.optim.Adam(net.parameters() )
lowercase_ : Union[str, Any] = Accelerator()
with self.assertRaises(lowerCamelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase_ : Tuple = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowercase_ : Optional[int] = DummyModel()
lowercase_ : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ : int = torch.optim.lr_scheduler.StepLR(lowerCamelCase__ , step_size=1 , gamma=0.99 )
lowercase_ : Any = dummy_dataloaders()
lowercase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
lowercase_ : Optional[Any] = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
lowercase_ : int = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
lowercase_ : Union[str, Any] = scheduler.state_dict()
train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(lowerCamelCase__ , scheduler.state_dict() )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
lowercase_ : Dict = DummyModel()
lowercase_ : int = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ , total_limit=2 )
# Train baseline
lowercase_ : Optional[int] = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
lowercase_ : Union[str, Any] = accelerator.prepare(lowerCamelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : str = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase__ = '/tmp/accelerate/state_checkpointing'
UpperCamelCase__ = DummyModel()
UpperCamelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
UpperCamelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCamelCase__, UpperCamelCase__ = dummy_dataloaders()
UpperCamelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase__, UpperCamelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
UpperCamelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
UpperCamelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 718
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCAmelCase ( __a ):
__lowerCamelCase: Tuple = ["pixel_values"]
def __init__( self : Any , a : Any = True , a : Optional[Any] = None , a : List[str] = PILImageResampling.BICUBIC , a : Optional[Any] = True , a : str = None , a : Optional[int] = True , a : Optional[int] = 1 / 2_5_5 , a : Tuple = True , a : Tuple = None , a : List[str] = None , a : Tuple = True , **a : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
lowercase_ : List[Any] = size if size is not None else {"shortest_edge": 2_2_4}
lowercase_ : Dict = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
lowercase_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name="crop_size" )
lowercase_ : Optional[Any] = do_resize
lowercase_ : List[Any] = size
lowercase_ : Optional[Any] = resample
lowercase_ : Optional[Any] = do_center_crop
lowercase_ : Tuple = crop_size
lowercase_ : str = do_rescale
lowercase_ : Union[str, Any] = rescale_factor
lowercase_ : Tuple = do_normalize
lowercase_ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase_ : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase_ : str = do_convert_rgb
def lowerCAmelCase__ ( self : Union[str, Any] , a : Any , a : Union[str, Any] , a : int = PILImageResampling.BICUBIC , a : Tuple = None , **a : Optional[int] , ):
'''simple docstring'''
lowercase_ : Optional[int] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase_ : Optional[Any] = get_resize_output_image_size(lowerCAmelCase_ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[int] , a : Optional[Any] , a : str = None , **a : List[str] , ):
'''simple docstring'''
lowercase_ : Optional[Any] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase__ ( self : int , a : Optional[int] , a : Dict , a : List[str] = None , **a : Tuple , ):
'''simple docstring'''
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase__ ( self : Optional[Any] , a : str , a : Union[str, Any] , a : int , a : Optional[Any] = None , **a : Any , ):
'''simple docstring'''
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase__ ( self : str , a : List[Any] , a : str = None , a : str = None , a : Union[str, Any] = None , a : Union[str, Any] = None , a : Union[str, Any] = None , a : Any = None , a : int = None , a : List[Any] = None , a : Tuple = None , a : Any = None , a : Optional[Any] = None , a : List[Any] = None , a : Optional[int] = ChannelDimension.FIRST , **a : int , ):
'''simple docstring'''
lowercase_ : Dict = do_resize if do_resize is not None else self.do_resize
lowercase_ : str = size if size is not None else self.size
lowercase_ : str = get_size_dict(lowerCAmelCase_ , param_name="size" , default_to_square=lowerCAmelCase_ )
lowercase_ : Union[str, Any] = resample if resample is not None else self.resample
lowercase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Any = crop_size if crop_size is not None else self.crop_size
lowercase_ : List[Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" , default_to_square=lowerCAmelCase_ )
lowercase_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Dict = image_mean if image_mean is not None else self.image_mean
lowercase_ : Optional[int] = image_std if image_std is not None else self.image_std
lowercase_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ : Optional[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ : Union[str, Any] = [convert_to_rgb(lowerCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ : Union[str, Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
lowercase_ : Any = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
lowercase_ : Tuple = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
lowercase_ : List[str] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
lowercase_ : List[str] = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
lowercase_ : int = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
lowercase_ : Any = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 719
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
lowercase_ : Optional[Any] = os.path.abspath(UpperCamelCase__ )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
lowercase_ : Any = torch.load(UpperCamelCase__ , map_location="cpu" )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
lowercase_ : List[str] = convert_pytorch_state_dict_to_flax(UpperCamelCase__ , UpperCamelCase__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowercase_ : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(UpperCamelCase__ , UpperCamelCase__ )
return flax_state_dict
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_UpperCamelCase ) -> bool:
return len(set(UpperCamelCase__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowercase_ : List[Any] = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowercase_ : Optional[int] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowercase_ : Union[str, Any] = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowercase_ : str = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase_ : Tuple = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
lowercase_ : Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase_ : Optional[Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(UpperCamelCase__ ):
lowercase_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase_ : Tuple = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase_ : Optional[int] = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowercase_ : int = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowercase_ : List[Any] = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowercase_ : Optional[Any] = pt_tuple_key[-2] + """_v"""
if name is not None:
lowercase_ : List[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase_ : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowercase_ : int = flax_model.params["""params"""]
else:
lowercase_ : Union[str, Any] = flax_model.params
lowercase_ : List[str] = flatten_dict(UpperCamelCase__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase_ : Optional[Any] = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(UpperCamelCase__ )
lowercase_ : int = {}
lowercase_ : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
lowercase_ : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase_ : Dict = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
lowercase_ : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase_ : int = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase_ : str = rename_key_and_reshape_tensor(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# add model prefix if necessary
lowercase_ : str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase_ : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowercase_ : List[str] = jnp.asarray(UpperCamelCase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
continue
# also add unexpected weight so that warning is thrown
lowercase_ : Union[str, Any] = jnp.asarray(UpperCamelCase__ )
else:
# also add unexpected weight so that warning is thrown
lowercase_ : List[str] = jnp.asarray(UpperCamelCase__ )
return unflatten_dict(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
import torch
# Load the index
lowercase_ : List[Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowercase_ : Any = torch.load(UpperCamelCase__ )
lowercase_ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase_ : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase_ : str = flax_model.params["""params"""]
lowercase_ : List[str] = flatten_dict(UpperCamelCase__ )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
lowercase_ : Dict = flax_model.params
lowercase_ : List[Any] = flatten_dict(UpperCamelCase__ )
lowercase_ : Optional[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
lowercase_ : List[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase_ : Tuple = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
lowercase_ : Optional[int] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase_ : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase_ : int = rename_key_and_reshape_tensor(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# add model prefix if necessary
lowercase_ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase_ : List[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowercase_ : Optional[int] = jnp.asarray(UpperCamelCase__ )
continue
if "var" in flax_key[-1]:
lowercase_ : Any = jnp.asarray(UpperCamelCase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
continue
# also add unexpected weight so that warning is thrown
lowercase_ : Tuple = jnp.asarray(UpperCamelCase__ )
else:
# also add unexpected weight so that warning is thrown
lowercase_ : List[Any] = jnp.asarray(UpperCamelCase__ )
return unflatten_dict(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = os.path.abspath(UpperCamelCase__ )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
lowercase_ : List[str] = getattr(UpperCamelCase__ , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(UpperCamelCase__ , "rb" ) as state_f:
try:
lowercase_ : Optional[int] = from_bytes(UpperCamelCase__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
lowercase_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase__ ) ).values()
if any(UpperCamelCase__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
lowercase_ : Optional[Any] = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase__ )
lowercase_ : List[Any] = flatten_dict(UpperCamelCase__ )
lowercase_ : Optional[Any] = pt_model.state_dict()
lowercase_ : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
lowercase_ : List[str] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowercase_ : Union[str, Any] = []
lowercase_ : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase_ : Dict = flax_key_tuple[0] == pt_model.base_model_prefix
lowercase_ : int = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase_ : Optional[int] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowercase_ : List[str] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(UpperCamelCase__ ) not in pt_model_dict:
# conv layer
lowercase_ : Tuple = flax_key_tuple[:-1] + ("""weight""",)
lowercase_ : Dict = jnp.transpose(UpperCamelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ) not in pt_model_dict:
# linear layer
lowercase_ : str = flax_key_tuple[:-1] + ("""weight""",)
lowercase_ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase_ : Any = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowercase_ : Tuple = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
lowercase_ : Tuple = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
lowercase_ : int = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowercase_ : Optional[int] = """.""".join(UpperCamelCase__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowercase_ : Tuple = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowercase_ : Any = key.split("." )
lowercase_ : Any = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowercase_ : Optional[Any] = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowercase_ : int = key_components[-2] + """_v"""
if name is not None:
lowercase_ : str = key_components[:-3] + [name]
lowercase_ : int = """.""".join(UpperCamelCase__ )
lowercase_ : Any = key
if flax_key in special_pt_names:
lowercase_ : Optional[int] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowercase_ : List[Any] = np.asarray(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , np.ndarray ) else flax_tensor
lowercase_ : Tuple = torch.from_numpy(UpperCamelCase__ )
# remove from missing keys
missing_keys.remove(UpperCamelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase__ )
pt_model.load_state_dict(UpperCamelCase__ )
# re-transform missing_keys to list
lowercase_ : Union[str, Any] = list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(UpperCamelCase__ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"If your task is similar to the task the model of the checkpoint was trained on, "
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 720
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCamelCase__ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , a : List[str] , a : Optional[int]=7 , a : List[str]=3 , a : Dict=1_8 , a : Tuple=3_0 , a : List[Any]=4_0_0 , a : Any=None , a : Any=True , a : Tuple=True , a : List[str]=None , ):
'''simple docstring'''
lowercase_ : str = size if size is not None else {"height": 2_0, "width": 2_0}
lowercase_ : Any = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : List[Any] = num_channels
lowercase_ : int = image_size
lowercase_ : Optional[Any] = min_resolution
lowercase_ : str = max_resolution
lowercase_ : str = size
lowercase_ : Tuple = do_normalize
lowercase_ : Optional[Any] = do_convert_rgb
lowercase_ : Optional[Any] = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
lowercase_ : Dict = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
lowercase_ : int = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Any = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[Any] = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_convert_rgb" ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : List[Any] = self.image_processor_tester.prepare_dummy_image()
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Tuple = 2_0_4_8
lowercase_ : Optional[int] = image_processor(UpperCAmelCase__ , return_tensors="pt" , max_patches=UpperCAmelCase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowercase_ : Dict = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase_ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase_ : int = image_processor(
UpperCAmelCase__ , return_tensors="pt" , max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowercase_ : List[Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
lowercase_ : int = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCAmelCase__ ):
lowercase_ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase__ ).flattened_patches
lowercase_ : List[Any] = "Hello"
lowercase_ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase__ , header_text=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase_ : Dict = image_processor(
UpperCAmelCase__ , return_tensors="pt" , max_patches=UpperCAmelCase__ , header_text=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
lowercase_ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase_ : List[str] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase_ : Tuple = image_processor(
UpperCAmelCase__ , return_tensors="pt" , max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase_ : List[Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase_ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase_ : List[Any] = image_processor(
UpperCAmelCase__ , return_tensors="pt" , max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: List[Any] = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : str = PixaStructImageProcessingTester(self , num_channels=4 )
lowercase_ : str = 3
@property
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_convert_rgb" ) )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowercase_ : List[Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase_ : Any = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase_ : str = image_processor(
UpperCAmelCase__ , return_tensors="pt" , max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 721
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCamelCase__ = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : int = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=_UpperCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=_UpperCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=_UpperCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=_UpperCamelCase , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=_UpperCamelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=_UpperCamelCase , type=_UpperCamelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=_UpperCamelCase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=_UpperCamelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase_ : Optional[Any] = parser.parse_args()
return args
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
def fn(_UpperCamelCase ):
return tokenizer(examples["text"] )
return fn
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase_ : int = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase )
lowercase_ : Tuple = tf.train.Example(features=_UpperCamelCase )
lowercase_ : List[str] = example.SerializeToString()
records.append(_UpperCamelCase )
return records
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase_ : Dict = min(len(_UpperCamelCase ) , args.limit )
lowercase_ : str = dataset.select(range(_UpperCamelCase ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase_ : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase_ : Optional[Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
else:
lowercase_ : int = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase_ : Dict = tokenize_function(_UpperCamelCase )
lowercase_ : Optional[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_UpperCamelCase ):
# Concatenate all texts.
lowercase_ : Dict = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase_ : Optional[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase_ : Dict = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase_ : Union[str, Any] = {
k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase_ : Tuple = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1000 , num_proc=4 )
lowercase_ : List[Any] = 0
lowercase_ : Tuple = 0
for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ):
lowercase_ : Union[str, Any] = grouped_dataset[shard : shard + args.shard_size]
lowercase_ : Union[str, Any] = len(dataset_snapshot["input_ids"] )
lowercase_ : str = os.path.join(_UpperCamelCase , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase_ : str = get_serialized_examples(_UpperCamelCase )
with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file:
for i in range(len(_UpperCamelCase ) ):
lowercase_ : Optional[int] = serialized_examples[i]
out_file.write(_UpperCamelCase )
print("Wrote file {} containing {} records".format(_UpperCamelCase , _UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = parse_args()
main(args)
| 700
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: List[Any] = StableDiffusionSAGPipeline
__lowerCamelCase: str = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase: Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCamelCase: str = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase: Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase: Union[str, Any] = False
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
lowercase_ : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase_ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowercase_ : Union[str, Any] = CLIPTextModel(a )
lowercase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase_ : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self : Tuple , a : Optional[int] , a : Any=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : str = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Dict = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowercase_ : Union[str, Any] = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase_ : Optional[int] = "."
lowercase_ : Union[str, Any] = torch.manual_seed(0 )
lowercase_ : Optional[Any] = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" )
lowercase_ : str = output.images
lowercase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ : List[str] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Tuple = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase_ : Any = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase_ : Union[str, Any] = "."
lowercase_ : Tuple = torch.manual_seed(0 )
lowercase_ : int = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" )
lowercase_ : int = output.images
lowercase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ : Tuple = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase_ : str = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase_ : str = "."
lowercase_ : Union[str, Any] = torch.manual_seed(0 )
lowercase_ : List[Any] = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" , )
lowercase_ : List[str] = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 701
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 0
|
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( __snake_case ):
__lowerCamelCase: Tuple = ['image_processor']
__lowerCamelCase: List[Any] = 'SamImageProcessor'
def __init__( self : Tuple , a : str ):
'''simple docstring'''
super().__init__(__UpperCamelCase )
lowercase_ : Optional[int] = self.image_processor
lowercase_ : Tuple = -1_0
lowercase_ : Optional[int] = self.image_processor.size["longest_edge"]
def __call__( self : Dict , a : List[str]=None , a : Tuple=None , a : int=None , a : Optional[Any]=None , a : int = None , **a : List[str] , ):
'''simple docstring'''
lowercase_ : List[str] = self.image_processor(
__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# pop arguments that are not used in the foward but used nevertheless
lowercase_ : Tuple = encoding_image_processor["original_sizes"]
if hasattr(__UpperCamelCase , "numpy" ): # Checks if Torch or TF tensor
lowercase_ : Optional[int] = original_sizes.numpy()
lowercase_ , lowercase_ , lowercase_ : int = self._check_and_preprocess_points(
input_points=__UpperCamelCase , input_labels=__UpperCamelCase , input_boxes=__UpperCamelCase , )
lowercase_ : int = self._normalize_and_convert(
__UpperCamelCase , __UpperCamelCase , input_points=__UpperCamelCase , input_labels=__UpperCamelCase , input_boxes=__UpperCamelCase , return_tensors=__UpperCamelCase , )
return encoding_image_processor
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str] , a : Tuple , a : Optional[int]=None , a : List[str]=None , a : Dict=None , a : List[str]="pt" , ):
'''simple docstring'''
if input_points is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
lowercase_ : Optional[int] = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , original_sizes[0] ) for point in input_points
]
else:
lowercase_ : int = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , __UpperCamelCase )
for point, original_size in zip(__UpperCamelCase , __UpperCamelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowercase_ , lowercase_ : Optional[Any] = self._pad_points_and_labels(__UpperCamelCase , __UpperCamelCase )
lowercase_ : Dict = np.array(__UpperCamelCase )
if input_labels is not None:
lowercase_ : int = np.array(__UpperCamelCase )
if input_boxes is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
lowercase_ : Union[str, Any] = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , original_sizes[0] , is_bounding_box=__UpperCamelCase )
for box in input_boxes
]
else:
lowercase_ : int = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , __UpperCamelCase , is_bounding_box=__UpperCamelCase )
for box, original_size in zip(__UpperCamelCase , __UpperCamelCase )
]
lowercase_ : Dict = np.array(__UpperCamelCase )
if input_boxes is not None:
if return_tensors == "pt":
lowercase_ : int = torch.from_numpy(__UpperCamelCase )
# boxes batch size of 1 by default
lowercase_ : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowercase_ : Union[str, Any] = tf.convert_to_tensor(__UpperCamelCase )
# boxes batch size of 1 by default
lowercase_ : Optional[Any] = tf.expand_dims(__UpperCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowercase_ : str = torch.from_numpy(__UpperCamelCase )
# point batch size of 1 by default
lowercase_ : List[str] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowercase_ : List[Any] = tf.convert_to_tensor(__UpperCamelCase )
# point batch size of 1 by default
lowercase_ : Union[str, Any] = tf.expand_dims(__UpperCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowercase_ : List[str] = torch.from_numpy(__UpperCamelCase )
# point batch size of 1 by default
lowercase_ : Tuple = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowercase_ : str = tf.convert_to_tensor(__UpperCamelCase )
# point batch size of 1 by default
lowercase_ : Dict = tf.expand_dims(__UpperCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def lowerCAmelCase__ ( self : Optional[int] , a : Optional[int] , a : Optional[int] ):
'''simple docstring'''
lowercase_ : List[Any] = max([point.shape[0] for point in input_points] )
lowercase_ : List[Any] = []
for i, point in enumerate(__UpperCamelCase ):
if point.shape[0] != expected_nb_points:
lowercase_ : Any = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowercase_ : Optional[int] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__UpperCamelCase )
lowercase_ : Union[str, Any] = processed_input_points
return input_points, input_labels
def lowerCAmelCase__ ( self : Optional[int] , a : Optional[int] , a : Tuple , a : Optional[int] , a : str=False ):
'''simple docstring'''
lowercase_ , lowercase_ : int = original_size
lowercase_ , lowercase_ : Optional[int] = self.image_processor._get_preprocess_shape(__UpperCamelCase , longest_edge=__UpperCamelCase )
lowercase_ : Any = deepcopy(__UpperCamelCase ).astype(__UpperCamelCase )
if is_bounding_box:
lowercase_ : Any = coords.reshape(-1 , 2 , 2 )
lowercase_ : int = coords[..., 0] * (new_w / old_w)
lowercase_ : str = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowercase_ : Optional[int] = coords.reshape(-1 , 4 )
return coords
def lowerCAmelCase__ ( self : Optional[Any] , a : str=None , a : List[str]=None , a : Optional[int]=None , ):
'''simple docstring'''
if input_points is not None:
if hasattr(__UpperCamelCase , "numpy" ): # Checks for TF or Torch tensor
lowercase_ : Optional[int] = input_points.numpy().tolist()
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(input_points[0] , __UpperCamelCase ):
raise ValueError("Input points must be a list of list of floating points." )
lowercase_ : Optional[int] = [np.array(__UpperCamelCase ) for input_point in input_points]
else:
lowercase_ : Optional[int] = None
if input_labels is not None:
if hasattr(__UpperCamelCase , "numpy" ):
lowercase_ : Dict = input_labels.numpy().tolist()
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(input_labels[0] , __UpperCamelCase ):
raise ValueError("Input labels must be a list of list integers." )
lowercase_ : Dict = [np.array(__UpperCamelCase ) for label in input_labels]
else:
lowercase_ : Any = None
if input_boxes is not None:
if hasattr(__UpperCamelCase , "numpy" ):
lowercase_ : Any = input_boxes.numpy().tolist()
if (
not isinstance(__UpperCamelCase , __UpperCamelCase )
or not isinstance(input_boxes[0] , __UpperCamelCase )
or not isinstance(input_boxes[0][0] , __UpperCamelCase )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
lowercase_ : int = [np.array(__UpperCamelCase ).astype(np.floataa ) for box in input_boxes]
else:
lowercase_ : int = None
return input_points, input_labels, input_boxes
@property
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(__UpperCamelCase ) )
def lowerCAmelCase__ ( self : Dict , *a : Tuple , **a : List[str] ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__UpperCamelCase , **__UpperCamelCase )
| 702
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 0
|
'''simple docstring'''
import os
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : Dict = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
lowercase_ : List[Any] = F"""{src_lang}-{tgt_lang}"""
lowercase_ : Optional[int] = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowercase_ : int = os.path.join(lowerCAmelCase_ , "README.md" )
print(F"""Generating {path}""" )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(lowerCAmelCase_ )
# make sure we are under the root of the project
UpperCamelCase__ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase__ = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase__ = model_name.split('-')
UpperCamelCase__ = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 703
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase_ : List[str] = """lm_head"""
lowercase_ : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
lowercase_ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
lowercase_ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase_ : List[str] = value
elif weight_type == "weight_g":
lowercase_ : Optional[Any] = value
elif weight_type == "weight_v":
lowercase_ : List[str] = value
elif weight_type == "bias":
lowercase_ : Optional[int] = value
else:
lowercase_ : str = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = []
lowercase_ : Optional[Any] = fairseq_model.state_dict()
lowercase_ : str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == "group" , )
lowercase_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ : Dict = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase_ : Dict = True
if "*" in mapped_key:
lowercase_ : Optional[Any] = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
lowercase_ : Union[str, Any] = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
lowercase_ : Optional[int] = """weight_g"""
elif "weight_v" in name:
lowercase_ : Tuple = """weight_v"""
elif "bias" in name:
lowercase_ : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase_ : str = """weight"""
else:
lowercase_ : List[str] = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = full_name.split("conv_layers." )[-1]
lowercase_ : int = name.split("." )
lowercase_ : List[Any] = int(items[0] )
lowercase_ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase_ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase_ : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase_ : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase_ : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True ):
"""simple docstring"""
if config_path is not None:
lowercase_ : Union[str, Any] = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
lowercase_ : Optional[int] = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase_ : Optional[int] = Dictionary.load_from_json(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ : Union[str, Any] = target_dict.pad_index
lowercase_ : List[Any] = target_dict.bos_index
lowercase_ : str = target_dict.eos_index
lowercase_ : str = len(target_dict.symbols )
lowercase_ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase_ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase_ : int = 42
lowercase_ : Optional[int] = 43
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase_ : int = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
lowercase_ : str = True if config.feat_extract_norm == """layer""" else False
lowercase_ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
lowercase_ : Union[str, Any] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase_ : List[str] = UniSpeechForCTC(SCREAMING_SNAKE_CASE__ )
else:
lowercase_ : Dict = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
lowercase_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowercase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase_ : Any = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 704
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 5000_0000 ):
"""simple docstring"""
lowercase_ : int = set()
lowercase_ : Union[str, Any] = int((limit - 24) ** (1 / 2) )
lowercase_ : Optional[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_ ) ) )
for primea in primes:
lowercase_ : Any = primea * primea
for primea in primes:
lowercase_ : Union[str, Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase_ : List[Any] = primea * primea * primea * primea
lowercase_ : Dict = square + cube + tetr
if total >= limit:
break
ret.add(a_ )
return len(a_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 705
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = "codegen"
__lowerCamelCase: Any = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , a : Optional[int]=5_0_4_0_0 , a : Tuple=2_0_4_8 , a : List[Any]=2_0_4_8 , a : Optional[int]=4_0_9_6 , a : str=2_8 , a : Tuple=1_6 , a : List[Any]=6_4 , a : Dict=None , a : Any="gelu_new" , a : Tuple=0.0 , a : Union[str, Any]=0.0 , a : Optional[Any]=0.0 , a : List[Any]=1e-5 , a : Optional[int]=0.02 , a : Optional[int]=True , a : Union[str, Any]=5_0_2_5_6 , a : str=5_0_2_5_6 , a : List[Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ : Optional[Any] = vocab_size
lowercase_ : Optional[Any] = n_ctx
lowercase_ : Any = n_positions
lowercase_ : List[Any] = n_embd
lowercase_ : Optional[Any] = n_layer
lowercase_ : str = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = rotary_dim
lowercase_ : Tuple = activation_function
lowercase_ : Optional[Any] = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : str = attn_pdrop
lowercase_ : int = layer_norm_epsilon
lowercase_ : List[Any] = initializer_range
lowercase_ : Dict = use_cache
lowercase_ : Dict = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(
bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a )
class _UpperCAmelCase ( snake_case ):
def __init__( self : Union[str, Any] , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ):
'''simple docstring'''
super().__init__(a , task=a , patching_specs=a , use_past=a )
if not getattr(self._config , "pad_token_id" , a ):
# TODO: how to do that better?
lowercase_ : List[str] = 0
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a , direction="inputs" )
lowercase_ : str = {0: "batch", 1: "past_sequence + sequence"}
else:
lowercase_ : List[str] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
return self._config.n_head
def lowerCAmelCase__ ( self : Dict , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = super(a , self ).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
# We need to order the input in the way they appears in the forward()
lowercase_ : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase_ , lowercase_ : Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase_ : Any = seqlen + 2
lowercase_ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase_ : List[Any] = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers )
]
lowercase_ : Optional[Any] = common_inputs["attention_mask"]
if self.use_past:
lowercase_ : List[str] = ordered_inputs["attention_mask"].dtype
lowercase_ : List[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a , a , dtype=a )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return 1_3
| 706
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase__ = {ord(char) for char in VALID_CHARS}
UpperCamelCase__ = ['the', 'be', 'to', 'of', 'and', 'in', 'that', 'have']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = ""
lowercase_ : int
lowercase_ : int
lowercase_ : int
for keychar, cipherchar in zip(cycle(lowercase_ ) , lowercase_ ):
lowercase_ : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[str] = []
for key in product(lowercase_ , repeat=3 ):
lowercase_ : int = try_key(lowercase_ , lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = "p059_cipher.txt" ):
"""simple docstring"""
lowercase_ : list[int]
lowercase_ : list[str]
lowercase_ : str
lowercase_ : str
lowercase_ : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
lowercase_ : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
lowercase_ : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
lowercase_ : Union[str, Any] = filter_common_word(lowercase_ , lowercase_ )
if len(lowercase_ ) == 1:
break
lowercase_ : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 707
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = [line.strip() for line in open(__UpperCamelCase , "r" ).readlines()]
lowercase_ : Dict = []
if args.gold_data_mode == "qa":
lowercase_ : int = pd.read_csv(__UpperCamelCase , sep="\t" , header=__UpperCamelCase )
for answer_list in data[1]:
lowercase_ : Optional[Any] = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
lowercase_ : Tuple = [line.strip() for line in open(__UpperCamelCase , "r" ).readlines()]
lowercase_ : Any = [[reference] for reference in references]
lowercase_ : Any = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowercase_ : List[Any] = 100.0 * em / total
lowercase_ : List[Any] = 100.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = args.k
lowercase_ : int = [line.strip() for line in open(__UpperCamelCase , "r" ).readlines()]
lowercase_ : Dict = [line.strip() for line in open(__UpperCamelCase , "r" ).readlines()]
lowercase_ : Optional[Any] = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
lowercase_ : List[str] = set(hypo.split("\t" )[:k] )
lowercase_ : int = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowercase_ : Any = 100.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def strip_title(_UpperCamelCase ):
if title.startswith("\"" ):
lowercase_ : Optional[int] = title[1:]
if title.endswith("\"" ):
lowercase_ : Tuple = title[:-1]
return title
lowercase_ : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors="pt" , padding=__UpperCamelCase , truncation=__UpperCamelCase , )["input_ids"].to(args.device )
lowercase_ : Optional[Any] = rag_model.rag.question_encoder(__UpperCamelCase )
lowercase_ : int = question_enc_outputs[0]
lowercase_ : Union[str, Any] = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
lowercase_ : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowercase_ : str = []
for docs in all_docs:
lowercase_ : List[str] = [strip_title(__UpperCamelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(__UpperCamelCase ) )
return provenance_strings
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
lowercase_ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors="pt" , padding=__UpperCamelCase , truncation=__UpperCamelCase )
lowercase_ : Optional[int] = inputs_dict.input_ids.to(args.device )
lowercase_ : List[Any] = inputs_dict.attention_mask.to(args.device )
lowercase_ : Union[str, Any] = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowercase_ : Any = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info("Q: {} - A: {}".format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__UpperCamelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__UpperCamelCase , choices=["exact", "compressed", "legacy"] , type=__UpperCamelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__UpperCamelCase , type=__UpperCamelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__UpperCamelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__UpperCamelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__UpperCamelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__UpperCamelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__UpperCamelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__UpperCamelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__UpperCamelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__UpperCamelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__UpperCamelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
lowercase_ : str = parser.parse_args()
lowercase_ : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = {}
if args.model_type is None:
lowercase_ : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
lowercase_ : List[str] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
lowercase_ : Any = args.n_docs
if args.index_name is not None:
lowercase_ : Optional[int] = args.index_name
if args.index_path is not None:
lowercase_ : Dict = args.index_path
else:
lowercase_ : Optional[Any] = BartForConditionalGeneration
lowercase_ : List[str] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __UpperCamelCase )
lowercase_ : Union[str, Any] = get_scores if args.eval_mode == "e2e" else get_precision_at_k
lowercase_ : int = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__UpperCamelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
lowercase_ : Union[str, Any] = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
lowercase_ : Dict = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
lowercase_ : Tuple = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
lowercase_ : Optional[Any] = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
lowercase_ : Union[str, Any] = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write("\n".join(__UpperCamelCase ) + "\n" )
preds_file.flush()
lowercase_ : Any = []
if len(__UpperCamelCase ) > 0:
lowercase_ : List[Any] = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write("\n".join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase__ = get_args()
main(args)
| 708
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
def __init__( self : List[Any] , a : Dict , a : List[str]=2 , a : List[Any]=True , a : Union[str, Any]=False , a : int=1_0 , a : Tuple=3 , a : Any=3_2 * 4 , a : str=3_2 * 6 , a : str=4 , a : Union[str, Any]=3_2 , ):
'''simple docstring'''
lowercase_ : List[Any] = parent
lowercase_ : Dict = batch_size
lowercase_ : int = is_training
lowercase_ : Dict = use_auxiliary_loss
lowercase_ : Dict = num_queries
lowercase_ : Tuple = num_channels
lowercase_ : List[Any] = min_size
lowercase_ : Optional[int] = max_size
lowercase_ : List[str] = num_labels
lowercase_ : Union[str, Any] = mask_feature_size
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
lowercase_ : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
lowercase_ : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
lowercase_ : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
lowercase_ : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ : Any = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : List[Any] , a : int , a : Dict ):
'''simple docstring'''
lowercase_ : Union[str, Any] = output.encoder_hidden_states
lowercase_ : List[str] = output.pixel_decoder_hidden_states
lowercase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_config.decoder_layers )
def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[Any] , a : Dict , a : Any , a : List[Any]=False ):
'''simple docstring'''
with torch.no_grad():
lowercase_ : str = MaskFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase_ : int = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
lowercase_ : Optional[int] = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , a : int , a : Any , a : int , a : Any , a : int ):
'''simple docstring'''
lowercase_ : Dict = MaskFormerForInstanceSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(a : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase_ : Any = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
lowercase_ : List[Any] = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
lowercase_ : Optional[Any] = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__lowerCamelCase: str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__lowerCamelCase: Tuple = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__lowerCamelCase: Tuple = False
__lowerCamelCase: List[str] = False
__lowerCamelCase: List[str] = False
__lowerCamelCase: List[str] = False
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : str = MaskFormerModelTester(self )
lowercase_ : Optional[Any] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(_lowerCAmelCase )
lowercase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowercase_ : Tuple = MaskFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[int] = (self.model_tester.min_size,) * 2
lowercase_ : List[str] = {
"pixel_values": torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
"mask_labels": torch.randn((2, 1_0, *size) , device=_lowerCAmelCase ),
"class_labels": torch.zeros(2 , 1_0 , device=_lowerCAmelCase ).long(),
}
lowercase_ : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowerCAmelCase )
lowercase_ : int = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
lowercase_ : Dict = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowercase_ : Union[str, Any] = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowercase_ : Dict = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
lowercase_ : List[str] = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[str] = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
lowercase_ : List[Any] = True
lowercase_ : Optional[int] = True
lowercase_ : Union[str, Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
lowercase_ : List[str] = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
lowercase_ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase_ : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowercase_ : Tuple = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase_ : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase__ = 1e-4
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Union[str, Any] = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(_lowerCAmelCase )
lowercase_ : str = self.default_image_processor
lowercase_ : int = prepare_img()
lowercase_ : Tuple = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
lowercase_ : Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowercase_ : int = model(**_lowerCAmelCase )
lowercase_ : Tuple = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
lowercase_ : int = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
lowercase_ : List[str] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_lowerCAmelCase )
.eval()
)
lowercase_ : Union[str, Any] = self.default_image_processor
lowercase_ : str = prepare_img()
lowercase_ : List[Any] = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
lowercase_ : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowercase_ : str = model(**_lowerCAmelCase )
# masks_queries_logits
lowercase_ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase_ : List[str] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
lowercase_ : List[str] = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
lowercase_ : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase_ : List[Any] = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(_lowerCAmelCase )
.eval()
)
lowercase_ : Optional[int] = self.default_image_processor
lowercase_ : List[str] = prepare_img()
lowercase_ : Tuple = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
lowercase_ : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowercase_ : Any = model(**_lowerCAmelCase )
# masks_queries_logits
lowercase_ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase_ : str = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
lowercase_ : Union[str, Any] = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
lowercase_ : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase_ : Optional[int] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_lowerCAmelCase )
.eval()
)
lowercase_ : int = self.default_image_processor
lowercase_ : int = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
lowercase_ : Optional[int] = inputs["pixel_values"].to(_lowerCAmelCase )
lowercase_ : Optional[int] = [el.to(_lowerCAmelCase ) for el in inputs["mask_labels"]]
lowercase_ : Tuple = [el.to(_lowerCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
lowercase_ : str = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 709
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 0
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
UpperCamelCase__ = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
UpperCamelCase__ = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ : List[str] = numpy_to_pil(snake_case_ )
return images
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if images.ndim == 3:
lowercase_ : List[str] = images[None, ...]
lowercase_ : int = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
lowercase_ : Tuple = [Image.fromarray(snake_case_ ) for image in images]
return pil_images
| 710
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 0
|
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__lowerCamelCase: int = BertJapaneseTokenizer
__lowerCamelCase: Tuple = False
__lowerCamelCase: str = True
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowercase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowercase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self : Union[str, Any] , a : Any ):
'''simple docstring'''
lowercase_ : Any = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase_ : Dict = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def lowerCAmelCase__ ( self : str , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : Dict = self.get_input_output_texts(UpperCamelCase__ )
lowercase_ : List[str] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ : Dict = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return text, ids
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer_class(self.vocab_file )
lowercase_ : Any = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(UpperCamelCase__ )
lowercase_ : Dict = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase_ : Dict = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
lowercase_ : Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase__ , "wb" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as handle:
lowercase_ : Tuple = pickle.load(UpperCamelCase__ )
lowercase_ : str = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Dict = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
try:
lowercase_ : Tuple = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
try:
lowercase_ : Optional[int] = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Tuple = MecabTokenizer(do_lower_case=UpperCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
try:
lowercase_ : Dict = MecabTokenizer(
do_lower_case=UpperCamelCase__ , normalize_text=UpperCamelCase__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = MecabTokenizer(normalize_text=UpperCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(UpperCamelCase__ )
lowercase_ : int = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase_ : Any = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
lowercase_ : str = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase__ , "wb" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as handle:
lowercase_ : Optional[int] = pickle.load(UpperCamelCase__ )
lowercase_ : Any = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_sudachi
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Tuple = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : str = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Optional[int] = SudachiTokenizer(do_lower_case=UpperCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[int] = SudachiTokenizer(normalize_text=UpperCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Optional[int] = SudachiTokenizer(trim_whitespace=UpperCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(UpperCamelCase__ )
lowercase_ : Dict = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase_ : List[str] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
lowercase_ : Dict = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase__ , "wb" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as handle:
lowercase_ : Dict = pickle.load(UpperCamelCase__ )
lowercase_ : Any = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_jumanpp
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Union[str, Any] = JumanppTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : int = JumanppTokenizer(normalize_text=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Dict = JumanppTokenizer(trim_whitespace=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowercase_ : str = {}
for i, token in enumerate(UpperCamelCase__ ):
lowercase_ : str = i
lowercase_ : Union[str, Any] = WordpieceTokenizer(vocab=UpperCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[Any] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
lowercase_ : str = tokenizer.subword_tokenizer
lowercase_ : Tuple = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(UpperCamelCase__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
lowercase_ : Tuple = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(UpperCamelCase__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
lowercase_ : Optional[int] = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase__ )
lowercase_ : Any = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase__ )
lowercase_ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowercase_ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__lowerCamelCase: Optional[int] = BertJapaneseTokenizer
__lowerCamelCase: Union[str, Any] = False
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowercase_ : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self : Optional[int] , **a : Dict ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **UpperCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , a : str ):
'''simple docstring'''
lowercase_ : int = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase_ : Dict = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
lowercase_ : str = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
UpperCamelCase__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase_ : Optional[int] = {}
for i, token in enumerate(UpperCamelCase__ ):
lowercase_ : Optional[Any] = i
lowercase_ : Dict = CharacterTokenizer(vocab=UpperCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
lowercase_ : Any = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase__ )
lowercase_ : str = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase__ )
lowercase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowercase_ : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Tuple = '''cl-tohoku/bert-base-japanese'''
lowercase_ : str = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
lowercase_ : Any = '''bert-base-cased'''
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 711
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[str] , a : List[str] , a : str=7 , a : str=3 , a : int=1_8 , a : Tuple=3_0 , a : Tuple=4_0_0 , a : Union[str, Any]=True , a : Optional[int]=None , a : Optional[int]=True , ):
'''simple docstring'''
lowercase_ : str = size if size is not None else {"height": 1_8, "width": 1_8}
lowercase_ : Tuple = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : int = image_size
lowercase_ : int = min_resolution
lowercase_ : Tuple = max_resolution
lowercase_ : Dict = do_resize
lowercase_ : Union[str, Any] = size
lowercase_ : str = do_normalize
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
__lowerCamelCase: Any = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Tuple = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "clusters" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
lowercase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Optional[int] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCamelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowerCamelCase )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Tuple = os.path.join(_lowerCamelCase , "image_processor.json" )
image_processor_first.to_json_file(_lowerCamelCase )
lowercase_ : List[str] = self.image_processing_class.from_json_file(_lowerCamelCase ).to_dict()
lowercase_ : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCamelCase )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowerCamelCase )
lowercase_ : List[Any] = self.image_processing_class.from_pretrained(_lowerCamelCase ).to_dict()
lowercase_ : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCamelCase )
@unittest.skip("ImageGPT requires clusters at initialization" )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Dict = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
lowercase_ : Tuple = Image.open(dataset[4]["file"] )
lowercase_ : Dict = Image.open(dataset[5]["file"] )
lowercase_ : List[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[str] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
lowercase_ : Dict = prepare_images()
# test non-batched
lowercase_ : Union[str, Any] = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
lowercase_ : Union[str, Any] = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowerCamelCase )
# test batched
lowercase_ : str = image_processing(_lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
lowercase_ : Tuple = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowerCamelCase )
| 712
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
UpperCamelCase__ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class _UpperCAmelCase ( _A ):
__lowerCamelCase: Any = VOCAB_FILES_NAMES
__lowerCamelCase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: Tuple = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: List[Any] = RealmTokenizer
def __init__( self : Dict , a : Tuple=None , a : Dict=None , a : Union[str, Any]=True , a : List[Any]="[UNK]" , a : List[str]="[SEP]" , a : int="[PAD]" , a : Optional[Any]="[CLS]" , a : Dict="[MASK]" , a : Any=True , a : Optional[int]=None , **a : str , ):
'''simple docstring'''
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowercase_ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
lowercase_ : str = getattr(a , normalizer_state.pop("type" ) )
lowercase_ : Tuple = do_lower_case
lowercase_ : Union[str, Any] = strip_accents
lowercase_ : List[Any] = tokenize_chinese_chars
lowercase_ : Any = normalizer_class(**a )
lowercase_ : Any = do_lower_case
def lowerCAmelCase__ ( self : Any , a : Dict , **a : int ):
'''simple docstring'''
lowercase_ : Optional[int] = PaddingStrategy.MAX_LENGTH
lowercase_ : Any = text
lowercase_ : List[Any] = kwargs.pop("text_pair" , a )
lowercase_ : Optional[int] = kwargs.pop("return_tensors" , a )
lowercase_ : List[Any] = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(a ):
if batch_text_pair is not None:
lowercase_ : Tuple = batch_text_pair[idx]
else:
lowercase_ : str = None
lowercase_ : Optional[Any] = super().__call__(a , a , return_tensors=a , **a )
lowercase_ : Union[str, Any] = encoded_candidates.get("input_ids" )
lowercase_ : Optional[Any] = encoded_candidates.get("attention_mask" )
lowercase_ : int = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(a )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(a )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(a )
lowercase_ : str = {key: item for key, item in output_data.items() if len(a ) != 0}
return BatchEncoding(a , tensor_type=a )
def lowerCAmelCase__ ( self : str , a : List[Any] , a : Dict=None ):
'''simple docstring'''
lowercase_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : int , a : List[Any] , a : List[Any] = None ):
'''simple docstring'''
lowercase_ : List[str] = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Any , a : Dict , a : List[str] = None ):
'''simple docstring'''
lowercase_ : Tuple = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 713
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 0
|
'''simple docstring'''
import os
import platform
import sys
UpperCamelCase__ = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 714
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : int = argparse.ArgumentParser(add_help=UpperCamelCase__ , allow_abbrev=UpperCamelCase__ )
# The main config parser
lowercase_ : Tuple = config_command_parser(UpperCamelCase__ )
# The subparser to add commands to
lowercase_ : Optional[int] = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(UpperCamelCase__ , parents=[parent_parser] )
update_command_parser(UpperCamelCase__ , parents=[parent_parser] )
return config_parser
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[str] = get_config_parser()
lowercase_ : int = config_parser.parse_args()
if not hasattr(UpperCamelCase__ , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 715
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 0
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = "cpu" , _UpperCamelCase = None ):
"""simple docstring"""
lowercase_ : List[str] = torch.load(_UpperCAmelCase , map_location=_UpperCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowercase_ : str = v.half()
if save_path is None: # overwrite src_path
lowercase_ : List[str] = src_path
torch.save(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 716
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 0
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
UpperCamelCase__ = True
except ImportError:
UpperCamelCase__ = False
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _UpperCAmelCase ( UpperCamelCase_ ):
@staticmethod
def lowerCAmelCase__ ( a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=__a , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=__a , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=__a )
def __init__( self : List[str] , a : Dict , a : Dict , a : List[str]=None , *a : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = testing
lowercase_ : List[str] = testing_file
lowercase_ : Optional[Any] = path
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowercase_ : Any = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:2_2]]
if len(__a ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowercase_ : Union[str, Any] = (
Path(__a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowercase_ : str = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__a ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowercase_ : List[Any] = json.load(__a )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__a , extra_context=__a , )
lowercase_ : Union[str, Any] = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowercase_ : List[str] = json.load(__a )
lowercase_ : Optional[Any] = configuration['lowercase_modelname']
lowercase_ : List[Any] = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f"""{directory}/configuration.json""" )
lowercase_ : str = 'PyTorch' in generate_tensorflow_pytorch_and_flax
lowercase_ : str = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
lowercase_ : Any = 'Flax' in generate_tensorflow_pytorch_and_flax
lowercase_ : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__a , exist_ok=__a )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__a )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , "w" ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(a : Any ):
with open(__a , "r" ) as f:
lowercase_ : Optional[int] = f.readlines()
with open(__a , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__a )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(a : Union[str, Any] , a : str , a : Tuple ):
# Create temp file
lowercase_ : Any = mkstemp()
lowercase_ : Any = False
with fdopen(__a , "w" ) as new_file:
with open(__a ) as old_file:
for line in old_file:
new_file.write(__a )
if line_to_copy_below in line:
lowercase_ : Union[str, Any] = True
for line_to_copy in lines_to_copy:
new_file.write(__a )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__a , __a )
# Remove original file
remove(__a )
# Move new file
move(__a , __a )
def skip_units(a : Tuple ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(a : Any ):
with open(__a ) as datafile:
lowercase_ : Optional[Any] = []
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowercase_ : Optional[int] = line.split("\"" )[1]
lowercase_ : Dict = skip_units(__a )
elif "# Below: " in line and "##" not in line:
lowercase_ : Optional[Any] = line.split("\"" )[1]
lowercase_ : Union[str, Any] = skip_units(__a )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__a , __a , __a )
lowercase_ : List[Any] = []
elif "# Replace with" in line and "##" not in line:
lowercase_ : List[str] = []
elif "##" not in line:
lines_to_copy.append(__a )
remove(__a )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__a )
| 717
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 0
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 16
UpperCamelCase__ = 32
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = 16 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
lowercase_ : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ : List[str] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : Optional[int] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowercase_ : int = 8
else:
lowercase_ : Dict = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowercase_ : List[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
lowercase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ = mocked_dataloaders # noqa: F811
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
lowercase_ : List[Any] = 2
# Initialize accelerator
lowercase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : Dict = config["""lr"""]
lowercase_ : Any = int(config["num_epochs"] )
lowercase_ : Optional[int] = int(config["seed"] )
lowercase_ : List[Any] = int(config["batch_size"] )
lowercase_ : Optional[Any] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_lowerCamelCase )
def inner_training_loop(_UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ : Optional[Any] = AdamW(params=model.parameters() , lr=_lowerCamelCase )
lowercase_ : List[Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate scheduler
lowercase_ : Dict = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ : Optional[Any] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ : Any = model(**_lowerCamelCase )
lowercase_ : Union[str, Any] = outputs.loss
accelerator.backward(_lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : Optional[int] = model(**_lowerCamelCase )
lowercase_ : Tuple = outputs.logits.argmax(dim=-1 )
lowercase_ : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
lowercase_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : Any = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 719
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( snake_case ):
def __init__( self : Optional[int] , a : List[str] , a : str=7_6_8 ):
'''simple docstring'''
super().__init__(A__ )
lowercase_ : Tuple = proj_size
lowercase_ : int = CLIPVisionModel(A__ )
lowercase_ : Union[str, Any] = PaintByExampleMapper(A__ )
lowercase_ : Any = nn.LayerNorm(config.hidden_size )
lowercase_ : List[str] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowercase_ : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowerCAmelCase__ ( self : Any , a : Optional[int] , a : Dict=False ):
'''simple docstring'''
lowercase_ : Optional[int] = self.model(pixel_values=A__ )
lowercase_ : Dict = clip_output.pooler_output
lowercase_ : Union[str, Any] = self.mapper(latent_states[:, None] )
lowercase_ : Optional[Any] = self.final_layer_norm(A__ )
lowercase_ : Any = self.proj_out(A__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Tuple , a : List[Any] ):
'''simple docstring'''
super().__init__()
lowercase_ : Optional[int] = (config.num_hidden_layers + 1) // 5
lowercase_ : List[Any] = config.hidden_size
lowercase_ : Optional[int] = 1
lowercase_ : Tuple = nn.ModuleList(
[
BasicTransformerBlock(A__ , A__ , A__ , activation_fn="gelu" , attention_bias=A__ )
for _ in range(A__ )
] )
def lowerCAmelCase__ ( self : Optional[int] , a : str ):
'''simple docstring'''
for block in self.blocks:
lowercase_ : str = block(A__ )
return hidden_states
| 720
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCamelCase__ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCamelCase__ = 'UperNetConfig'
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Any , a : Union[str, Any] , a : Union[str, Any] , a : List[str] , a : int = 0 , a : List[Any] = False , a : str = 1 , ):
'''simple docstring'''
super().__init__()
lowercase_ : Any = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , padding=_a , bias=_a , dilation=_a , )
lowercase_ : str = nn.BatchNormad(_a )
lowercase_ : Optional[int] = nn.ReLU()
def lowerCAmelCase__ ( self : Dict , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : str = self.conv(_a )
lowercase_ : List[str] = self.batch_norm(_a )
lowercase_ : Dict = self.activation(_a )
return output
class _UpperCAmelCase ( nn.Module ):
def __init__( self : str , a : Tuple , a : Any , a : Union[str, Any] ):
'''simple docstring'''
super().__init__()
lowercase_ : Any = [
nn.AdaptiveAvgPoolad(_a ),
UperNetConvModule(_a , _a , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_a ) , _a )
def lowerCAmelCase__ ( self : Optional[Any] , a : Dict ):
'''simple docstring'''
lowercase_ : int = input
for layer in self.layers:
lowercase_ : Any = layer(_a )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self : List[str] , a : Any , a : Dict , a : int , a : List[str] ):
'''simple docstring'''
super().__init__()
lowercase_ : Optional[Any] = pool_scales
lowercase_ : int = align_corners
lowercase_ : Any = in_channels
lowercase_ : str = channels
lowercase_ : int = []
for i, pool_scale in enumerate(_a ):
lowercase_ : Any = UperNetPyramidPoolingBlock(pool_scale=_a , in_channels=_a , channels=_a )
self.blocks.append(_a )
self.add_module(str(_a ) , _a )
def lowerCAmelCase__ ( self : Any , a : Dict ):
'''simple docstring'''
lowercase_ : Any = []
for ppm in self.blocks:
lowercase_ : Optional[int] = ppm(_a )
lowercase_ : List[str] = nn.functional.interpolate(
_a , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(_a )
return ppm_outs
class _UpperCAmelCase ( nn.Module ):
def __init__( self : List[str] , a : Union[str, Any] , a : Dict ):
'''simple docstring'''
super().__init__()
lowercase_ : Dict = config
lowercase_ : Any = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase_ : Union[str, Any] = in_channels
lowercase_ : str = config.hidden_size
lowercase_ : Tuple = False
lowercase_ : Dict = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase_ : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase_ : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase_ : Union[str, Any] = nn.ModuleList()
lowercase_ : Optional[int] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase_ : Tuple = UperNetConvModule(_a , self.channels , kernel_size=1 )
lowercase_ : str = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_a )
self.fpn_convs.append(_a )
lowercase_ : List[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.apply(self._init_weights )
def lowerCAmelCase__ ( self : Dict , a : Any ):
'''simple docstring'''
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCAmelCase__ ( self : List[Any] , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : Tuple = inputs[-1]
lowercase_ : str = [x]
psp_outs.extend(self.psp_modules(_a ) )
lowercase_ : Any = torch.cat(_a , dim=1 )
lowercase_ : List[str] = self.bottleneck(_a )
return output
def lowerCAmelCase__ ( self : Union[str, Any] , a : Dict ):
'''simple docstring'''
lowercase_ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_a ) )
# build top-down path
lowercase_ : str = len(_a )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase_ : Dict = laterals[i - 1].shape[2:]
lowercase_ : Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_a , mode="bilinear" , align_corners=self.align_corners )
# build outputs
lowercase_ : Optional[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase_ : Any = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
lowercase_ : Dict = torch.cat(_a , dim=1 )
lowercase_ : Optional[int] = self.fpn_bottleneck(_a )
lowercase_ : Optional[int] = self.classifier(_a )
return output
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Dict , a : Optional[Any] , a : Optional[Any] = 2 , a : Optional[int] = 3 , a : Union[str, Any] = 1 ):
'''simple docstring'''
super().__init__()
lowercase_ : Any = config
lowercase_ : str = config.auxiliary_in_channels
lowercase_ : List[Any] = config.auxiliary_channels
lowercase_ : Optional[int] = config.auxiliary_num_convs
lowercase_ : List[Any] = config.auxiliary_concat_input
lowercase_ : Dict = in_index
lowercase_ : List[str] = (kernel_size // 2) * dilation
lowercase_ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
if self.num_convs == 0:
lowercase_ : List[Any] = nn.Identity()
else:
lowercase_ : int = nn.Sequential(*_a )
if self.concat_input:
lowercase_ : Tuple = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_a , padding=kernel_size // 2 )
lowercase_ : int = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.apply(self._init_weights )
def lowerCAmelCase__ ( self : Any , a : Tuple ):
'''simple docstring'''
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCAmelCase__ ( self : int , a : int ):
'''simple docstring'''
lowercase_ : List[Any] = encoder_hidden_states[self.in_index]
lowercase_ : List[Any] = self.convs(_a )
if self.concat_input:
lowercase_ : Dict = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase_ : Optional[Any] = self.classifier(_a )
return output
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
__lowerCamelCase: str = UperNetConfig
__lowerCamelCase: Optional[Any] = 'pixel_values'
__lowerCamelCase: List[str] = True
def lowerCAmelCase__ ( self : Optional[Any] , a : Tuple ):
'''simple docstring'''
if isinstance(_a , _a ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowerCAmelCase__ ( self : Any , a : Tuple , a : Optional[int]=False ):
'''simple docstring'''
if isinstance(_a , _a ):
lowercase_ : List[str] = value
UpperCamelCase__ = r'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __SCREAMING_SNAKE_CASE , )
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , a : Optional[Any] ):
'''simple docstring'''
super().__init__(_a )
lowercase_ : List[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase_ : Optional[Any] = UperNetHead(_a , in_channels=self.backbone.channels )
lowercase_ : List[str] = UperNetFCNHead(_a ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=_a , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase__ ( self : Optional[Any] , a : str = None , a : str = None , a : List[Any] = None , a : Any = None , a : Optional[Any] = None , ):
'''simple docstring'''
lowercase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase_ : str = self.backbone.forward_with_filtered_kwargs(
_a , output_hidden_states=_a , output_attentions=_a )
lowercase_ : Optional[int] = outputs.feature_maps
lowercase_ : Dict = self.decode_head(_a )
lowercase_ : str = nn.functional.interpolate(_a , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=_a )
lowercase_ : Optional[int] = None
if self.auxiliary_head is not None:
lowercase_ : List[str] = self.auxiliary_head(_a )
lowercase_ : List[str] = nn.functional.interpolate(
_a , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=_a )
lowercase_ : List[str] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
lowercase_ : Dict = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase_ : int = loss_fct(_a , _a )
lowercase_ : Union[str, Any] = loss_fct(_a , _a )
lowercase_ : List[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase_ : str = (logits,) + outputs[1:]
else:
lowercase_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 721
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 0
|
'''simple docstring'''
from __future__ import annotations
import requests
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowerCamelCase ).json()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 10 ):
"""simple docstring"""
lowercase_ : Tuple = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowercase_ : Optional[int] = requests.get(_lowerCamelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCamelCase ) for story_id in story_ids]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 10 ):
"""simple docstring"""
lowercase_ : int = hackernews_top_stories(_lowerCamelCase )
return "\n".join("* [{title}]({url})".format(**_lowerCamelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 700
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( snake_case_ ):
__lowerCamelCase: Any = """data2vec-vision"""
def __init__( self : int , a : Tuple=7_6_8 , a : Dict=1_2 , a : Optional[int]=1_2 , a : Tuple=3_0_7_2 , a : Any="gelu" , a : Union[str, Any]=0.0 , a : str=0.0 , a : Union[str, Any]=0.02 , a : Union[str, Any]=1e-12 , a : List[Any]=2_2_4 , a : List[Any]=1_6 , a : Optional[Any]=3 , a : Tuple=False , a : Optional[int]=False , a : Union[str, Any]=False , a : List[Any]=False , a : List[Any]=0.1 , a : Dict=0.1 , a : List[Any]=True , a : str=[3, 5, 7, 1_1] , a : Dict=[1, 2, 3, 6] , a : Any=True , a : Tuple=0.4 , a : Union[str, Any]=2_5_6 , a : Any=1 , a : str=False , a : Tuple=2_5_5 , **a : str , ):
'''simple docstring'''
super().__init__(**a )
lowercase_ : int = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : int = layer_norm_eps
lowercase_ : Optional[Any] = image_size
lowercase_ : Tuple = patch_size
lowercase_ : List[Any] = num_channels
lowercase_ : Union[str, Any] = use_mask_token
lowercase_ : List[Any] = use_absolute_position_embeddings
lowercase_ : List[str] = use_relative_position_bias
lowercase_ : Any = use_shared_relative_position_bias
lowercase_ : Optional[Any] = layer_scale_init_value
lowercase_ : int = drop_path_rate
lowercase_ : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase_ : Tuple = out_indices
lowercase_ : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase_ : List[Any] = use_auxiliary_head
lowercase_ : List[Any] = auxiliary_loss_weight
lowercase_ : Optional[int] = auxiliary_channels
lowercase_ : List[Any] = auxiliary_num_convs
lowercase_ : str = auxiliary_concat_input
lowercase_ : List[str] = semantic_loss_ignore_index
class _UpperCAmelCase ( snake_case_ ):
__lowerCamelCase: Optional[Any] = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return 1e-4
| 701
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
__lowerCamelCase: str = BertTokenizer
__lowerCamelCase: Optional[int] = BertTokenizerFast
__lowerCamelCase: Any = True
__lowerCamelCase: int = True
__lowerCamelCase: Dict = filter_non_english
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
lowercase_ : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self : Optional[Any] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Tuple = "UNwant\u00E9d,running"
lowercase_ : str = "unwanted, running"
return input_text, output_text
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : str = self.tokenizer_class(self.vocab_file )
lowercase_ : List[str] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowercase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Union[str, Any] = self.get_rust_tokenizer()
lowercase_ : List[str] = "UNwant\u00E9d,running"
lowercase_ : Tuple = tokenizer.tokenize(lowercase__ )
lowercase_ : List[Any] = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
lowercase_ : Optional[Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
lowercase_ : Union[str, Any] = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
lowercase_ : Optional[int] = self.get_rust_tokenizer()
lowercase_ : Dict = tokenizer.encode(lowercase__ )
lowercase_ : Union[str, Any] = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# With lower casing
lowercase_ : Union[str, Any] = self.get_tokenizer(do_lower_case=lowercase__ )
lowercase_ : List[Any] = self.get_rust_tokenizer(do_lower_case=lowercase__ )
lowercase_ : Optional[Any] = "UNwant\u00E9d,running"
lowercase_ : Optional[Any] = tokenizer.tokenize(lowercase__ )
lowercase_ : Any = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
lowercase_ : Union[str, Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
lowercase_ : List[Any] = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
lowercase_ : int = self.get_rust_tokenizer()
lowercase_ : List[Any] = tokenizer.encode(lowercase__ )
lowercase_ : List[Any] = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Tuple = BasicTokenizer(do_lower_case=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Dict = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Optional[int] = BasicTokenizer()
lowercase_ : Optional[Any] = "a\n'll !!to?'d of, can't."
lowercase_ : List[Any] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(lowercase__ ) , lowercase__ )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[str] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowercase_ : Dict = {}
for i, token in enumerate(lowercase__ ):
lowercase_ : List[Any] = i
lowercase_ : str = WordpieceTokenizer(vocab=lowercase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : Dict = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
lowercase_ : Union[str, Any] = tokenizer.encode("sequence builders" , add_special_tokens=lowercase__ )
lowercase_ : str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase__ )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase__ )
lowercase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
lowercase_ : Tuple = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowercase_ : str = tokenizer_r.encode_plus(
lowercase__ , return_attention_mask=lowercase__ , return_token_type_ids=lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ , )
lowercase_ : int = tokenizer_r.do_lower_case if hasattr(lowercase__ , "do_lower_case" ) else False
lowercase_ : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = ["的", "人", "有"]
lowercase_ : Optional[int] = "".join(lowercase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase_ : Optional[Any] = True
lowercase_ : int = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
lowercase_ : Any = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
lowercase_ : Any = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__ )
lowercase_ : Optional[Any] = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__ )
lowercase_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(lowercase__ )
lowercase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowercase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
lowercase_ : Optional[Any] = False
lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
lowercase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
lowercase_ : List[str] = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__ )
lowercase_ : Dict = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__ )
lowercase_ : Dict = tokenizer_r.convert_ids_to_tokens(lowercase__ )
lowercase_ : Any = tokenizer_p.convert_ids_to_tokens(lowercase__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase_ : Optional[int] = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowercase__ )
]
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
| 702
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case , snake_case ):
__lowerCamelCase: Optional[Any] = 'focalnet'
def __init__( self : List[str] , a : str=2_2_4 , a : List[Any]=4 , a : str=3 , a : List[str]=9_6 , a : str=False , a : List[str]=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , a : Optional[int]=[2, 2, 6, 2] , a : List[Any]=[2, 2, 2, 2] , a : List[str]=[3, 3, 3, 3] , a : Dict="gelu" , a : Any=4.0 , a : Dict=0.0 , a : List[str]=0.1 , a : str=False , a : str=1e-4 , a : str=False , a : Optional[int]=False , a : Any=False , a : Dict=0.02 , a : str=1e-5 , a : int=3_2 , a : Union[str, Any]=None , a : Optional[int]=None , **a : Optional[Any] , ):
'''simple docstring'''
super().__init__(**a )
lowercase_ : int = image_size
lowercase_ : List[str] = patch_size
lowercase_ : int = num_channels
lowercase_ : Any = embed_dim
lowercase_ : str = use_conv_embed
lowercase_ : int = hidden_sizes
lowercase_ : Any = depths
lowercase_ : str = focal_levels
lowercase_ : Tuple = focal_windows
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Any = mlp_ratio
lowercase_ : Any = hidden_dropout_prob
lowercase_ : str = drop_path_rate
lowercase_ : Any = use_layerscale
lowercase_ : List[str] = layerscale_value
lowercase_ : str = use_post_layernorm
lowercase_ : Optional[Any] = use_post_layernorm_in_modulation
lowercase_ : List[Any] = normalize_modulator
lowercase_ : str = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Any = encoder_stride
lowercase_ : Any = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowercase_ : Tuple = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
| 703
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 0
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _UpperCAmelCase :
__lowerCamelCase: List[Any] = LEDConfig
__lowerCamelCase: Union[str, Any] = {}
__lowerCamelCase: Any = 'gelu'
def __init__( self : List[Any] , a : int , a : Optional[int]=1_3 , a : List[str]=7 , a : int=True , a : Dict=False , a : Optional[Any]=9_9 , a : Tuple=3_2 , a : Dict=2 , a : Dict=4 , a : Dict=3_7 , a : List[str]=0.1 , a : Optional[int]=0.1 , a : Optional[Any]=2_0 , a : Tuple=2 , a : List[Any]=1 , a : str=0 , a : str=4 , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : int = seq_length
lowercase_ : str = is_training
lowercase_ : Any = use_labels
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : int = eos_token_id
lowercase_ : Dict = pad_token_id
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : int = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowercase_ : str = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowercase_ : str = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowercase_ : int = prepare_led_inputs_dict(a , a , a )
lowercase_ : Tuple = tf.concat(
[tf.zeros_like(a )[:, :-1], tf.ones_like(a )[:, -1:]] , axis=-1 , )
lowercase_ : List[Any] = global_attention_mask
return config, inputs_dict
def lowerCAmelCase__ ( self : str , a : List[str] , a : Tuple ):
'''simple docstring'''
lowercase_ : str = TFLEDModel(config=a ).get_decoder()
lowercase_ : Optional[int] = inputs_dict['input_ids']
lowercase_ : Dict = input_ids[:1, :]
lowercase_ : Tuple = inputs_dict['attention_mask'][:1, :]
lowercase_ : int = 1
# first forward pass
lowercase_ : str = model(a , attention_mask=a , use_cache=a )
lowercase_ : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase_ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase_ : Dict = model(a , attention_mask=a )[0]
lowercase_ : str = model(a , attention_mask=a , past_key_values=a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase_ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase_ : Dict = output_from_no_past[:, -3:, random_slice_idx]
lowercase_ : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a , a , rtol=1e-3 )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ):
"""simple docstring"""
if attention_mask is None:
lowercase_ : Dict = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _UpperCAmelCase ( __A , __A , unittest.TestCase ):
__lowerCamelCase: Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__lowerCamelCase: Any = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase: Optional[Any] = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase: str = True
__lowerCamelCase: List[str] = False
__lowerCamelCase: Tuple = False
__lowerCamelCase: str = False
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[Any] = TFLEDModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
lowercase_ : Tuple = 2
lowercase_ : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
lowercase_ : List[str] = True
lowercase_ : Dict = self.model_tester.seq_length
lowercase_ : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a : Dict ):
lowercase_ : Dict = outputs.decoder_attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a : int ):
lowercase_ : Optional[int] = [t.numpy() for t in outputs.encoder_attentions]
lowercase_ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = False
lowercase_ : List[Any] = False
lowercase_ : Any = model_class(a )
lowercase_ : Any = model(self._prepare_for_class(a , a ) )
lowercase_ : Any = len(a )
self.assertEqual(config.output_hidden_states , a )
check_encoder_attentions_output(a )
if self.is_encoder_decoder:
lowercase_ : Dict = model_class(a )
lowercase_ : int = model(self._prepare_for_class(a , a ) )
self.assertEqual(config.output_hidden_states , a )
check_decoder_attentions_output(a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase_ : Tuple = True
lowercase_ : Tuple = model_class(a )
lowercase_ : Tuple = model(self._prepare_for_class(a , a ) )
self.assertEqual(config.output_hidden_states , a )
check_encoder_attentions_output(a )
# Check attention is always last and order is fine
lowercase_ : List[str] = True
lowercase_ : Optional[int] = True
lowercase_ : str = model_class(a )
lowercase_ : int = model(self._prepare_for_class(a , a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a ) )
self.assertEqual(model.config.output_hidden_states , a )
check_encoder_attentions_output(a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return tf.constant(__lowercase , dtype=tf.intaa )
UpperCamelCase__ = 1e-4
@slow
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
lowercase_ : Any = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowercase_ : int = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowercase_ : List[str] = prepare_led_inputs_dict(model.config , a , a )
lowercase_ : str = model(**a )[0]
lowercase_ : Union[str, Any] = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , a )
# change to expected output here
lowercase_ : int = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a , atol=1e-3 )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
lowercase_ : Union[str, Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowercase_ : List[str] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowercase_ : List[Any] = prepare_led_inputs_dict(model.config , a , a )
lowercase_ : List[str] = model(**a )[0]
lowercase_ : Dict = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , a )
# change to expected output here
lowercase_ : Any = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a , atol=1e-3 , rtol=1e-3 )
| 704
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
lowercase_ : Tuple = "segformer.encoder." + key
if key.startswith("backbone" ):
lowercase_ : Union[str, Any] = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowercase_ : Dict = key[key.find("patch_embed" ) + len("patch_embed" )]
lowercase_ : List[Any] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(__snake_case )-1}""" )
if "norm" in key:
lowercase_ : int = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowercase_ : List[str] = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
lowercase_ : Union[str, Any] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(__snake_case )-1}""" )
if "layer_norm1" in key:
lowercase_ : Dict = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
lowercase_ : str = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
lowercase_ : Tuple = key[key.find("block" ) + len("block" )]
lowercase_ : str = key.replace(F"""block{idx}""" , F"""block.{int(__snake_case )-1}""" )
if "attn.q" in key:
lowercase_ : Union[str, Any] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
lowercase_ : Optional[int] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
lowercase_ : Tuple = key.replace("attn" , "attention.self" )
if "fc1" in key:
lowercase_ : Optional[Any] = key.replace("fc1" , "dense1" )
if "fc2" in key:
lowercase_ : List[str] = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
lowercase_ : Dict = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
lowercase_ : int = key.replace("linear_fuse.conv" , "linear_fuse" )
lowercase_ : Any = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowercase_ : Dict = key[key.find("linear_c" ) + len("linear_c" )]
lowercase_ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(__snake_case )-1}""" )
if key.startswith("head" ):
lowercase_ : Tuple = key.replace("head" , "classifier" )
lowercase_ : Dict = value
return new_state_dict
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowercase_ : Optional[int] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowercase_ : int = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowercase_ : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
lowercase_ : List[Any] = kv_bias[: config.hidden_sizes[i]]
lowercase_ : List[str] = kv_weight[
config.hidden_sizes[i] :, :
]
lowercase_ : Tuple = kv_bias[
config.hidden_sizes[i] :
]
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ : List[Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return image
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = SegformerConfig()
lowercase_ : Any = False
# set attributes based on model_name
lowercase_ : int = "huggingface/label-files"
if "segformer" in model_name:
lowercase_ : Optional[int] = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
lowercase_ : Dict = 150
lowercase_ : Dict = "ade20k-id2label.json"
lowercase_ : Dict = (1, 150, 128, 128)
elif "city" in model_name:
lowercase_ : int = 19
lowercase_ : List[str] = "cityscapes-id2label.json"
lowercase_ : Optional[int] = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
lowercase_ : Any = True
lowercase_ : List[Any] = model_name[4:6]
lowercase_ : Tuple = 1000
lowercase_ : int = "imagenet-1k-id2label.json"
lowercase_ : List[str] = (1, 1000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
lowercase_ : int = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
lowercase_ : Optional[int] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase_ : Dict = idalabel
lowercase_ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowercase_ : Tuple = [64, 128, 320, 512]
lowercase_ : int = 256
elif size == "b2":
lowercase_ : Union[str, Any] = [64, 128, 320, 512]
lowercase_ : Dict = 768
lowercase_ : List[Any] = [3, 4, 6, 3]
elif size == "b3":
lowercase_ : Dict = [64, 128, 320, 512]
lowercase_ : int = 768
lowercase_ : Optional[int] = [3, 4, 18, 3]
elif size == "b4":
lowercase_ : Optional[int] = [64, 128, 320, 512]
lowercase_ : List[Any] = 768
lowercase_ : Optional[int] = [3, 8, 27, 3]
elif size == "b5":
lowercase_ : Any = [64, 128, 320, 512]
lowercase_ : Tuple = 768
lowercase_ : Optional[int] = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
lowercase_ : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
# prepare image
lowercase_ : List[Any] = prepare_img()
lowercase_ : Tuple = image_processor(images=__snake_case , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
lowercase_ : Dict = torch.load(__snake_case , map_location=torch.device("cpu" ) )
else:
lowercase_ : Optional[Any] = torch.load(__snake_case , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
lowercase_ : str = rename_keys(__snake_case , encoder_only=__snake_case )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__snake_case , __snake_case )
# create HuggingFace model and load state dict
if encoder_only:
lowercase_ : List[str] = False
lowercase_ : Dict = SegformerForImageClassification(__snake_case )
else:
lowercase_ : Any = SegformerForSemanticSegmentation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# forward pass
lowercase_ : str = model(__snake_case )
lowercase_ : Dict = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowercase_ : str = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowercase_ : str = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowercase_ : Tuple = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowercase_ : Optional[int] = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowercase_ : Any = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowercase_ : List[str] = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowercase_ : List[str] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowercase_ : Optional[Any] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowercase_ : List[str] = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
lowercase_ : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 705
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
UpperCamelCase__ = '''docs/source/en/_toctree.yml'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = defaultdict(lowerCAmelCase__ )
lowercase_ : Tuple = []
lowercase_ : Optional[int] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(lowerCAmelCase__ )
lowercase_ : Optional[Any] = new_doc_list
lowercase_ : Optional[Any] = [key for key, value in counts.items() if value > 1]
lowercase_ : Tuple = []
for duplicate_key in duplicates:
lowercase_ : Any = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
lowercase_ : Tuple = sorted(lowerCAmelCase__ , key=lambda _UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase__ ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(lowerCAmelCase__ )
# Sort
return overview_doc
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=False ):
"""simple docstring"""
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
lowercase_ : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ : List[str] = content[api_idx]["sections"]
# Then to the model doc
lowercase_ : int = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase_ : str = api_doc[scheduler_idx]["sections"]
lowercase_ : Any = clean_doc_toc(lowerCAmelCase__ )
lowercase_ : List[str] = False
if new_scheduler_doc != scheduler_doc:
lowercase_ : Optional[int] = True
if overwrite:
lowercase_ : List[Any] = new_scheduler_doc
if diff:
if overwrite:
lowercase_ : List[Any] = api_doc
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=False ):
"""simple docstring"""
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
lowercase_ : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ : int = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ : str = content[api_idx]["sections"]
# Then to the model doc
lowercase_ : Union[str, Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase_ : Tuple = False
lowercase_ : int = api_doc[pipeline_idx]["sections"]
lowercase_ : Any = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase_ : Dict = pipeline_doc["section"]
lowercase_ : Optional[Any] = clean_doc_toc(lowerCAmelCase__ )
if overwrite:
lowercase_ : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase__ )
# sort overall pipeline doc
lowercase_ : Dict = clean_doc_toc(lowerCAmelCase__ )
if new_pipeline_docs != pipeline_docs:
lowercase_ : Optional[int] = True
if overwrite:
lowercase_ : Tuple = new_pipeline_docs
if diff:
if overwrite:
lowercase_ : Any = api_doc
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 706
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCamelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
__lowerCamelCase: Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , ):
"""simple docstring"""
import pyspark
def generate_fn():
lowercase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
lowercase_ : Dict = df_with_partition_id.select("*" ).where(F"""part_id = {partition_id}""" ).drop("part_id" )
lowercase_ : Dict = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _UpperCAmelCase ( _BaseExamplesIterable ):
def __init__( self : Tuple , a : Optional[int] , a : Any=None , ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : int ) -> Tuple:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self : Optional[Any] , a : int ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def lowerCAmelCase__ ( self : List[Any] , a : List[str] , a : Tuple ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def lowerCAmelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return len(self.partition_order )
class _UpperCAmelCase ( datasets.DatasetBuilder ):
__lowerCamelCase: Any = SparkConfig
def __init__( self : List[str] , a : List[Any] , a : Dict = None , a : Dict = None , **a : Union[str, Any] , ) -> Dict:
'''simple docstring'''
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[Any] = df
lowercase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def lowerCAmelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
def create_cache_and_write_probe(a : Any ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowercase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self : Union[str, Any] , a : Any ) -> List[Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self : str , a : List[Any] ) -> Optional[int]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a : Union[str, Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowercase_ : str = self.df.count()
lowercase_ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowercase_ : int = self.df.repartition(a_ )
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any] , a : str , a : List[str] , ) -> Optional[int]:
'''simple docstring'''
import pyspark
lowercase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowercase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : int = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : Tuple = self._fs.storage_options
def write_arrow(a : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowercase_ : List[Any] = 0
lowercase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowercase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowercase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowercase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowercase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self : List[str] , a : int , a : Dict = "arrow" , a : List[Any] = None , a : Dict = None , **a : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
self._validate_cache_dir()
lowercase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowercase_ : Dict = not is_remote_filesystem(self._fs )
lowercase_ : List[str] = os.path.join if is_local else posixpath.join
lowercase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowercase_ : List[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowercase_ : int = path_join(self._output_dir , a_ )
lowercase_ : int = 0
lowercase_ : Optional[Any] = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
lowercase_
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowercase_ : Dict = total_num_examples
lowercase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowercase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a : int , a : Any , a : Union[str, Any] , ):
rename(
a_ , fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , f"""{global_shard_id:05d}""" ).replace("NNNNN" , f"""{total_shards:05d}""" ) , )
lowercase_ : Optional[int] = []
lowercase_ : Dict = 0
for i in range(len(a_ ) ):
lowercase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowercase_ : int = 0
lowercase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def lowerCAmelCase__ ( self : int , a : str , ) -> Any:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 707
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 0
|
'''simple docstring'''
import math
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = 2
lowercase_ : Optional[Any] = int(math.sqrt(_snake_case ) ) # Size of every segment
lowercase_ : Dict = [True] * (end + 1)
lowercase_ : Optional[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(_snake_case )
for i in range(start * start , end + 1 , _snake_case ):
lowercase_ : int = False
start += 1
prime += in_prime
lowercase_ : Optional[int] = end + 1
lowercase_ : Dict = min(2 * end , _snake_case )
while low <= n:
lowercase_ : int = [True] * (high - low + 1)
for each in in_prime:
lowercase_ : Union[str, Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_snake_case , high + 1 , _snake_case ):
lowercase_ : List[str] = False
for j in range(len(_snake_case ) ):
if temp[j] is True:
prime.append(j + low )
lowercase_ : Tuple = high + 1
lowercase_ : int = min(high + end , _snake_case )
return prime
print(sieve(10**6))
| 708
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if not isinstance(A_ , A_ ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
lowercase_ : Optional[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 0
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCamelCase__ = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
lowercase_ : List[str] = self.transformer_dir
shutil.copy(
os.path.join(_A , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def lowerCAmelCase__ ( self : Optional[int] , a : int , a : int , a : Optional[Any] , a : Dict=None ):
'''simple docstring'''
lowercase_ : int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase_ : Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase_ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
lowercase_ : List[str] = black.format_str(_A , mode=_A )
lowercase_ : List[Any] = os.path.join(self.transformer_dir , "new_code.py" )
with open(_A , "w" , newline="\n" ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , "r" ) as f:
self.assertTrue(f.read() , _A )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(_A , _A )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , _A , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , _A ) , )
# Copy consistency with a really long name
lowercase_ : Any = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("Bert" , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , _A , overwrite_result=re.sub("Bert" , "TestModel" , _A ) , )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
lowercase_ : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
lowercase_ : Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowercase_ : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
lowercase_ : Union[str, Any] = check_copies.convert_to_localized_md(
_A , _A , localized_readme["format_model_list"] )
self.assertFalse(_A )
self.assertEqual(_A , _A )
lowercase_ : Any = check_copies.convert_to_localized_md(
_A , _A , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_A )
lowercase_ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
lowercase_ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowercase_ : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowercase_ : Union[str, Any] = check_copies.convert_to_localized_md(
_A , _A , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(_A , _A )
| 711
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 0
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
lowercase_ : Tuple = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("RGB" )
lowercase_ : List[str] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
lowercase_ : Dict = transform(__snake_case ).unsqueeze(0 ).to(__snake_case )
return image
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if "visual_encoder" in key:
lowercase_ : Tuple = re.sub("visual_encoder*" , "vision_model.encoder" , __snake_case )
if "blocks" in key:
lowercase_ : Union[str, Any] = re.sub(R"blocks" , "layers" , __snake_case )
if "attn" in key:
lowercase_ : List[str] = re.sub(R"attn" , "self_attn" , __snake_case )
if "norm1" in key:
lowercase_ : Optional[int] = re.sub(R"norm1" , "layer_norm1" , __snake_case )
if "norm2" in key:
lowercase_ : Optional[int] = re.sub(R"norm2" , "layer_norm2" , __snake_case )
if "encoder.norm" in key:
lowercase_ : Dict = re.sub(R"encoder.norm" , "post_layernorm" , __snake_case )
if "encoder.patch_embed.proj" in key:
lowercase_ : Dict = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , __snake_case )
if "encoder.pos_embed" in key:
lowercase_ : List[str] = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , __snake_case )
if "encoder.cls_token" in key:
lowercase_ : Union[str, Any] = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , __snake_case )
if "self_attn" in key:
lowercase_ : Union[str, Any] = re.sub(R"self_attn.proj" , "self_attn.projection" , __snake_case )
return key
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
if config_path is not None:
lowercase_ : Union[str, Any] = BlipConfig.from_pretrained(__snake_case )
else:
lowercase_ : Optional[int] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase_ : Any = BlipForConditionalGeneration(__snake_case ).eval()
lowercase_ : Dict = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
lowercase_ : Dict = blip_decoder(pretrained=__snake_case , image_size=384 , vit="base" )
lowercase_ : str = pt_model.eval()
lowercase_ : List[str] = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase_ : int = modified_state_dict.pop(__snake_case )
lowercase_ : Optional[int] = rename_key(__snake_case )
lowercase_ : int = value
hf_model.load_state_dict(__snake_case )
lowercase_ : List[str] = 384
lowercase_ : Tuple = load_demo_image(image_size=__snake_case , device="cpu" )
lowercase_ : str = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase_ : Dict = tokenizer(["a picture of"] ).input_ids
lowercase_ : Dict = hf_model.generate(__snake_case , __snake_case )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase_ : str = hf_model.generate(__snake_case )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase_ : Optional[int] = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
lowercase_ : List[Any] = blip_vqa(pretrained=__snake_case , image_size=__snake_case , vit="base" )
vqa_model.eval()
lowercase_ : Optional[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase_ : Union[str, Any] = modified_state_dict.pop(__snake_case )
lowercase_ : Dict = rename_key(__snake_case )
lowercase_ : Optional[int] = value
lowercase_ : Tuple = BlipForQuestionAnswering(__snake_case )
hf_vqa_model.load_state_dict(__snake_case )
lowercase_ : str = ["How many dogs are in this image?"]
lowercase_ : Dict = tokenizer(__snake_case , return_tensors="pt" ).input_ids
lowercase_ : int = hf_vqa_model.generate(__snake_case , __snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
lowercase_ : List[str] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
lowercase_ : List[str] = blip_itm(pretrained=__snake_case , image_size=__snake_case , vit="base" )
itm_model.eval()
lowercase_ : Dict = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase_ : Optional[Any] = modified_state_dict.pop(__snake_case )
lowercase_ : int = rename_key(__snake_case )
lowercase_ : Dict = value
lowercase_ : Optional[int] = BlipForImageTextRetrieval(__snake_case )
lowercase_ : Tuple = ["A picture of a woman with a dog sitting in a beach"]
lowercase_ : List[str] = tokenizer(
__snake_case , return_tensors="pt" , padding="max_length" , truncation=__snake_case , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__snake_case )
hf_itm_model.eval()
lowercase_ : Union[str, Any] = hf_itm_model(__snake_case , __snake_case , use_itm_head=__snake_case )
lowercase_ : Tuple = hf_itm_model(__snake_case , __snake_case , use_itm_head=__snake_case )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCamelCase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 712
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase_ : List[str] = mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
lowercase_ : Dict = max(
mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , j - wt[i - 1] ) + val[i - 1] , )
lowercase_ : str = val
return f[i][j]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase_ : Any = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase_ : Union[str, Any] = dp[i - 1][w_]
return dp[n][w_], dp
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if not (isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(lowerCamelCase_ , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
lowercase_ : str = len(lowerCamelCase_ )
if num_items != len(lowerCamelCase_ ):
lowercase_ : str = (
"""The number of weights must be the same as the number of values.\n"""
F"""But got {num_items} weights and {len(lowerCamelCase_ )} values"""
)
raise ValueError(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
if not isinstance(wt[i] , lowerCamelCase_ ):
lowercase_ : Tuple = (
"""All weights must be integers but got weight of """
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(lowerCamelCase_ )
lowercase_ : List[str] = knapsack(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowercase_ : set = set()
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return optimal_val, example_optional_set
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , lowerCamelCase_ , lowerCamelCase_ )
else:
optimal_set.add(lowerCamelCase_ )
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , j - wt[i - 1] , lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ = [3, 2, 4, 4]
UpperCamelCase__ = [4, 3, 2, 3]
UpperCamelCase__ = 4
UpperCamelCase__ = 6
UpperCamelCase__ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCamelCase__, UpperCamelCase__ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCamelCase__, UpperCamelCase__ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 713
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 0
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = len(A__ ) + 1
lowercase_ : Tuple = len(A__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase_ : List[Any] = [[0 for i in range(A__ )] for j in range(A__ )]
# since string of zero length match pattern of zero length
lowercase_ : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , A__ ):
lowercase_ : Tuple = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , A__ ):
lowercase_ : str = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , A__ ):
for j in range(1 , A__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase_ : Any = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase_ : List[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase_ : int = dp[i - 1][j]
else:
lowercase_ : Dict = 0
else:
lowercase_ : int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCamelCase__ = 'aab'
UpperCamelCase__ = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 714
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase: Dict = 'falcon'
__lowerCamelCase: Dict = ['past_key_values']
def __init__( self : List[Any] , a : int=6_5_0_2_4 , a : int=4_5_4_4 , a : List[Any]=3_2 , a : str=7_1 , a : Optional[Any]=1e-5 , a : List[Any]=0.02 , a : Any=True , a : Optional[int]=0.0 , a : str=0.0 , a : Optional[int]=None , a : Optional[int]=False , a : int=False , a : Union[str, Any]=True , a : List[Any]=True , a : str=False , a : Union[str, Any]=1_1 , a : Optional[Any]=1_1 , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ : str = vocab_size
# Backward compatibility with n_embed kwarg
lowercase_ : Dict = kwargs.pop("n_embed" , a )
lowercase_ : List[str] = hidden_size if n_embed is None else n_embed
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : int = layer_norm_epsilon
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Optional[Any] = use_cache
lowercase_ : Union[str, Any] = hidden_dropout
lowercase_ : str = attention_dropout
lowercase_ : Dict = bos_token_id
lowercase_ : List[str] = eos_token_id
lowercase_ : Dict = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase_ : List[Any] = alibi
lowercase_ : List[str] = new_decoder_architecture
lowercase_ : Tuple = multi_query # Ignored when new_decoder_architecture is True
lowercase_ : Optional[int] = parallel_attn
lowercase_ : str = bias
super().__init__(bos_token_id=a , eos_token_id=a , **a )
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return not self.alibi
| 715
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 0
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
def __init__( self : List[Any] , a : str = None , a : uuid.UUID = None , a : List[Any]=None , a : int=None ):
'''simple docstring'''
if not conversation_id:
lowercase_ : Any = uuid.uuida()
if past_user_inputs is None:
lowercase_ : List[str] = []
if generated_responses is None:
lowercase_ : Optional[Any] = []
lowercase_ : Tuple = conversation_id
lowercase_ : str = past_user_inputs
lowercase_ : Optional[Any] = generated_responses
lowercase_ : Any = text
def __eq__( self : Optional[Any] , a : Optional[Any] ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self : List[Any] , a : str , a : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
lowercase_ : Tuple = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowercase_ : Any = text
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase_ : List[Any] = None
def lowerCAmelCase__ ( self : Dict , a : str ):
'''simple docstring'''
self.generated_responses.append(lowercase_ )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowercase_ : Any = "user" if is_user else "bot"
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
snake_case , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , *a : Optional[Any] , **a : str ):
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
if self.tokenizer.pad_token_id is None:
lowercase_ : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase__ ( self : Union[str, Any] , a : Union[str, Any]=None , a : int=None , a : str=None , **a : str ):
'''simple docstring'''
lowercase_ : Union[str, Any] = {}
lowercase_ : Union[str, Any] = {}
lowercase_ : List[Any] = {}
if min_length_for_response is not None:
lowercase_ : List[str] = min_length_for_response
if minimum_tokens is not None:
lowercase_ : Optional[int] = minimum_tokens
if "max_length" in generate_kwargs:
lowercase_ : Union[str, Any] = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase_ : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , a : Union[Conversation, List[Conversation]] , a : str=0 , **a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self : List[Any] , a : Conversation , a : Any=3_2 ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
lowercase_ : Dict = self.tokenizer._build_conversation_input_ids(lowercase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase_ : Dict = self._legacy_parse_and_tokenize(lowercase_ )
if self.framework == "pt":
lowercase_ : Optional[int] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase_ : Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self : Union[str, Any] , a : Any , a : Optional[int]=1_0 , **a : Dict ):
'''simple docstring'''
lowercase_ : Any = generate_kwargs.get("max_length" , self.model.config.max_length )
lowercase_ : List[Any] = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowercase_ : Any = max_length - minimum_tokens
lowercase_ : Union[str, Any] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
lowercase_ : Dict = model_inputs["attention_mask"][:, -trim:]
lowercase_ : str = model_inputs.pop("conversation" )
lowercase_ : Tuple = max_length
lowercase_ : List[str] = self.model.generate(**lowercase_ , **lowercase_ )
if self.model.config.is_encoder_decoder:
lowercase_ : Optional[int] = 1
else:
lowercase_ : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self : Optional[Any] , a : Union[str, Any] , a : int=True ):
'''simple docstring'''
lowercase_ : Optional[Any] = model_outputs["output_ids"]
lowercase_ : List[str] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
lowercase_ : List[Any] = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_ )
return conversation
def lowerCAmelCase__ ( self : Any , a : Conversation ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer.eos_token_id
lowercase_ : Any = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
if len(lowercase_ ) > self.tokenizer.model_max_length:
lowercase_ : List[str] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 716
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 0
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
def __init__( self : Any , a : List[str] , a : int ):
'''simple docstring'''
lowercase_ : str = question_encoder
lowercase_ : List[str] = generator
lowercase_ : Dict = self.question_encoder
def lowerCAmelCase__ ( self : Optional[int] , a : str ):
'''simple docstring'''
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowercase_ : Union[str, Any] = os.path.join(lowerCAmelCase_ , "question_encoder_tokenizer" )
lowercase_ : Tuple = os.path.join(lowerCAmelCase_ , "generator_tokenizer" )
self.question_encoder.save_pretrained(lowerCAmelCase_ )
self.generator.save_pretrained(lowerCAmelCase_ )
@classmethod
def lowerCAmelCase__ ( cls : Dict , a : Tuple , **a : Dict ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
lowercase_ : List[str] = kwargs.pop("config" , lowerCAmelCase_ )
if config is None:
lowercase_ : Dict = RagConfig.from_pretrained(lowerCAmelCase_ )
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=lowerCAmelCase_ , generator=lowerCAmelCase_ )
def __call__( self : int , *a : int , **a : Tuple ):
'''simple docstring'''
return self.current_tokenizer(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase__ ( self : int , *a : Optional[int] , **a : List[str] ):
'''simple docstring'''
return self.generator.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase__ ( self : Tuple , *a : List[Any] , **a : Union[str, Any] ):
'''simple docstring'''
return self.generator.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = self.question_encoder
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.generator
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] , a : Optional[Any] = None , a : int = None , a : List[Any] = None , a : List[str] = "longest" , a : Union[str, Any] = None , a : Optional[Any] = True , **a : Optional[int] , ):
'''simple docstring'''
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , lowerCAmelCase_ , )
if max_length is None:
lowercase_ : Any = self.current_tokenizer.model_max_length
lowercase_ : Dict = self(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowercase_ : List[str] = self.current_tokenizer.model_max_length
lowercase_ : List[str] = self(
text_target=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ : Union[str, Any] = labels["input_ids"]
return model_inputs
| 717
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if gpta_config_file == "":
lowercase_ : List[Any] = GPTaConfig()
else:
lowercase_ : str = GPTaConfig.from_json_file(_UpperCamelCase )
lowercase_ : str = GPTaModel(_UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowercase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 718
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 0
|
'''simple docstring'''
import string
import numpy
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _UpperCamelCase )
class _UpperCAmelCase :
__lowerCamelCase: Dict = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase: str = numpy.vectorize(lambda snake_case : x % 36 )
__lowerCamelCase: Union[str, Any] = numpy.vectorize(SCREAMING_SNAKE_CASE__ )
def __init__( self : Any , a : Any ):
'''simple docstring'''
lowercase_ : Dict = self.modulus(snake_case__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowercase_ : Tuple = encrypt_key.shape[0]
def lowerCAmelCase__ ( self : List[Any] , a : Tuple ):
'''simple docstring'''
return self.key_string.index(snake_case__ )
def lowerCAmelCase__ ( self : Optional[int] , a : Any ):
'''simple docstring'''
return self.key_string[round(snake_case__ )]
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ : List[Any] = det % len(self.key_string )
lowercase_ : Dict = len(self.key_string )
if greatest_common_divisor(snake_case__ , len(self.key_string ) ) != 1:
lowercase_ : Optional[Any] = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(snake_case__ )
def lowerCAmelCase__ ( self : int , a : str ):
'''simple docstring'''
lowercase_ : List[str] = [char for char in text.upper() if char in self.key_string]
lowercase_ : str = chars[-1]
while len(snake_case__ ) % self.break_key != 0:
chars.append(snake_case__ )
return "".join(snake_case__ )
def lowerCAmelCase__ ( self : int , a : Any ):
'''simple docstring'''
lowercase_ : Any = self.process_text(text.upper() )
lowercase_ : Tuple = ""
for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ):
lowercase_ : Optional[int] = text[i : i + self.break_key]
lowercase_ : List[str] = [self.replace_letters(snake_case__ ) for char in batch]
lowercase_ : Optional[int] = numpy.array([vec] ).T
lowercase_ : List[str] = self.modulus(self.encrypt_key.dot(snake_case__ ) ).T.tolist()[
0
]
lowercase_ : List[str] = "".join(
self.replace_digits(snake_case__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ : Optional[Any] = det % len(self.key_string )
lowercase_ : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowercase_ : Tuple = i
break
lowercase_ : List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(snake_case__ ) )
def lowerCAmelCase__ ( self : List[str] , a : List[str] ):
'''simple docstring'''
lowercase_ : str = self.make_decrypt_key()
lowercase_ : int = self.process_text(text.upper() )
lowercase_ : Union[str, Any] = ""
for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ):
lowercase_ : Optional[Any] = text[i : i + self.break_key]
lowercase_ : Union[str, Any] = [self.replace_letters(snake_case__ ) for char in batch]
lowercase_ : Optional[int] = numpy.array([vec] ).T
lowercase_ : Union[str, Any] = self.modulus(decrypt_key.dot(snake_case__ ) ).T.tolist()[0]
lowercase_ : Optional[int] = "".join(
self.replace_digits(snake_case__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = int(input("Enter the order of the encryption key: " ) )
lowercase_ : Tuple = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(_UpperCamelCase ):
lowercase_ : Optional[Any] = [int(_UpperCamelCase ) for x in input().split()]
hill_matrix.append(_UpperCamelCase )
lowercase_ : Union[str, Any] = HillCipher(numpy.array(_UpperCamelCase ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
lowercase_ : List[Any] = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
lowercase_ : Optional[int] = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(_UpperCamelCase ) )
elif option == "2":
lowercase_ : Dict = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 0
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(r'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
UpperCamelCase__ = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = None
# source code of `config_class`
lowercase_ : List[Any] = inspect.getsource(__a )
lowercase_ : Optional[int] = _re_checkpoint.findall(__a )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
lowercase_ : List[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowercase_ : Optional[Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowercase_ : Dict = ckpt_name
break
return checkpoint
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowercase_ : int = get_checkpoint_from_config_class(__a )
lowercase_ : Dict = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
lowercase_ : Any = "\n".join(sorted(__a ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 720
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 0
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
"""simple docstring"""
def wrapper(*lowerCamelCase , **lowerCamelCase ):
lowercase_ : Dict = timeit.default_timer()
lowercase_ : int = func(*lowerCamelCase , **lowerCamelCase )
lowercase_ : Any = timeit.default_timer() - starttime
return delta
lowercase_ : Tuple = func.__name__
return wrapper
def __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase=100 , lowerCamelCase=None ):
"""simple docstring"""
lowercase_ : Any = []
lowercase_ : Optional[int] = seq_shapes or {}
for i in range(lowerCamelCase ):
lowercase_ : Union[str, Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCamelCase , _ArrayXD ):
lowercase_ : Any = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCamelCase , datasets.Value ):
if v.dtype == "string":
lowercase_ : str = "The small grey turtle was surprisingly fast when challenged."
else:
lowercase_ : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCamelCase , datasets.Sequence ):
while isinstance(lowerCamelCase , datasets.Sequence ):
lowercase_ : int = v.feature
lowercase_ : int = seq_shapes[k]
lowercase_ : Dict = np.random.rand(*lowerCamelCase ).astype(v.dtype )
lowercase_ : Dict = data
dummy_data.append((i, example) )
return dummy_data
def __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , lowerCamelCase=100 , lowerCamelCase=None ):
"""simple docstring"""
lowercase_ : Any = generate_examples(lowerCamelCase , num_examples=lowerCamelCase , seq_shapes=lowerCamelCase )
with ArrowWriter(features=lowerCamelCase , path=lowerCamelCase ) as writer:
for key, record in dummy_data:
lowercase_ : List[Any] = features.encode_example(lowerCamelCase )
writer.write(lowerCamelCase )
lowercase_ : Tuple = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
lowercase_ : List[str] = datasets.Dataset.from_file(filename=lowerCamelCase , info=datasets.DatasetInfo(features=lowerCamelCase ) )
return dataset
| 721
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[str] , a : int , a : int=1_3 , a : int=3 , a : str=2_2_4 , a : str=3_0 , a : Dict=4_0_0 , a : Any=True , a : Dict=None , a : Optional[int]=True , a : Optional[Any]=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
lowercase_ : Optional[Any] = parent
lowercase_ : int = batch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : List[str] = image_size
lowercase_ : Optional[int] = min_resolution
lowercase_ : str = max_resolution
lowercase_ : Union[str, Any] = do_resize
lowercase_ : Optional[int] = size
lowercase_ : Tuple = do_normalize
lowercase_ : Optional[int] = image_mean
lowercase_ : Union[str, Any] = image_std
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( lowercase__ , unittest.TestCase ):
__lowerCamelCase: List[str] = ViTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = EfficientFormerImageProcessorTester(self )
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowercase_ : int = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowercase_ : int = image_processor(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowercase_ : str = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowercase_ : Dict = image_processor(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase_ : int = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
lowercase_ : Tuple = image_processor(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 700
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , a : List[str] , a : Tuple=7 , a : Tuple=3 , a : Optional[Any]=3_0 , a : Any=4_0_0 , a : Optional[int]=True , a : Tuple=None , a : Dict=0.9 , a : Dict=None , a : Union[str, Any]=True , a : List[str]=[0.5, 0.5, 0.5] , a : str=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase_ : List[str] = size if size is not None else {"shortest_edge": 3_0}
lowercase_ : str = crop_size if crop_size is not None else {"height": 3_0, "width": 3_0}
lowercase_ : List[str] = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : Dict = min_resolution
lowercase_ : List[Any] = max_resolution
lowercase_ : Dict = do_resize_and_center_crop
lowercase_ : Optional[Any] = size
lowercase_ : Dict = crop_pct
lowercase_ : Tuple = crop_size
lowercase_ : Tuple = do_normalize
lowercase_ : Optional[int] = image_mean
lowercase_ : Tuple = image_std
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__lowerCamelCase: int = PoolFormerImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Tuple = PoolFormerImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
self.assertTrue(hasattr(_lowerCamelCase , "crop_pct" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 3_0} )
self.assertEqual(image_processor.crop_size , {"height": 3_0, "width": 3_0} )
lowercase_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
lowercase_ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
lowercase_ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ : str = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
lowercase_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ : Optional[int] = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 701
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 0
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , __lowerCamelCase )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = AutoConfig.from_pretrained("gpt2" )
lowercase_ : List[str] = GenerationConfig.from_model_config(__lowerCamelCase )
lowercase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = GenerationConfig()
lowercase_ : Optional[int] = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowercase_ : Dict = copy.deepcopy(__lowerCamelCase )
lowercase_ : str = generation_config.update(**__lowerCamelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"} )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[Any] = GenerationConfig()
lowercase_ : List[str] = '''bar'''
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase )
lowercase_ : Dict = GenerationConfig.from_pretrained(__lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
lowercase_ : Union[str, Any] = GenerationConfig.from_model_config(__lowerCamelCase )
assert not hasattr(__lowerCamelCase , "foo" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __lowerCamelCase )
self.assertEqual(default_config.num_beams , 1 )
lowercase_ : List[Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __lowerCamelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
lowercase_ : Dict = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __lowerCamelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCAmelCase__ ( cls : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
lowercase_ : Tuple = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
lowercase_ : List[Any] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
| 702
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 0
|
'''simple docstring'''
UpperCamelCase__ = [0, 2, 4, 6, 8]
UpperCamelCase__ = [1, 3, 5, 7, 9]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase_ : Optional[Any] = 0
for digit in range(10 ):
lowercase_ : Dict = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCamelCase__ , UpperCamelCase__ )
return result
lowercase_ : List[str] = 0
for digita in range(10 ):
lowercase_ : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
lowercase_ : Optional[int] = ODD_DIGITS
else:
lowercase_ : Union[str, Any] = EVEN_DIGITS
for digita in other_parity_digits:
lowercase_ : List[Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase__ , UpperCamelCase__ , )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 9 ):
"""simple docstring"""
lowercase_ : Any = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase__ , 0 , [0] * length , UpperCamelCase__ )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 703
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.