code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[list]:
lowercase : Dict = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ):
if magnitude == 0:
lowercase : Union[str, Any] = column
continue
lowercase : List[Any] = column / magnitude
# Subtract to cancel term
lowercase : Tuple = current_set[0]
lowercase : Tuple = [first_row]
lowercase : Union[str, Any] = current_set[1::]
for row in current_set:
lowercase : Any = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE__ )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase : List[str] = final_set[0]
lowercase : Dict = []
lowercase : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase : Dict = simplify(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE__ )
lowercase : Any = resultant
return final_set
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list:
if len(SCREAMING_SNAKE_CASE__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase : str = len(SCREAMING_SNAKE_CASE__ ) + 1
if any(len(SCREAMING_SNAKE_CASE__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase : str = equations.copy()
if any(0 in row for row in data_set ):
lowercase : Dict = data_set.copy()
lowercase : int = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ):
if 0 not in row:
lowercase : Tuple = data_set.pop(SCREAMING_SNAKE_CASE__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , SCREAMING_SNAKE_CASE__ )
lowercase : Any = data_set.copy()
lowercase : List[str] = simplify(SCREAMING_SNAKE_CASE__ )
lowercase : int = simplified[::-1]
lowercase : list = []
for row in simplified:
lowercase : List[Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase : int = row.copy()[: len(SCREAMING_SNAKE_CASE__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
solutions.append(0 )
continue
lowercase : Optional[Any] = temp_row[1::]
lowercase : Dict = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE__ )
lowercase : Any = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Any = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 20 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A_ = re.compile(r'''\s+''')
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(snake_case__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Any = [len(snake_case__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )}
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str]=5 ):
"""simple docstring"""
_snake_case : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_snake_case : Tuple = example["""content"""].splitlines()
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=0.05 ):
"""simple docstring"""
_snake_case : Optional[Any] = ["""unit tests""", """test file""", """configuration file"""]
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : Dict = 0
_snake_case : str = 0
# first test
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case : Optional[int] = example["""content"""].count("""\n""" )
_snake_case : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = ["""def """, """class """, """for """, """while """]
_snake_case : str = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str]=4 ):
"""simple docstring"""
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : str = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Optional[Any] = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""]
_snake_case : Optional[Any] = len(example["""content"""] ) / len(snake_case__ )
return {"ratio": ratio}
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[int] = {}
results.update(get_hash(snake_case__ ) )
results.update(line_stats(snake_case__ ) )
results.update(alpha_stats(snake_case__ ) )
results.update(char_token_ratio(snake_case__ ) )
results.update(is_autogenerated(snake_case__ ) )
results.update(is_config_or_test(snake_case__ ) )
results.update(has_no_keywords(snake_case__ ) )
results.update(has_few_assignments(snake_case__ ) )
return results
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if not check_uniques(snake_case__ , snake_case__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """rb""" ) as f_in:
with gzip.open(str(snake_case__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case__ , snake_case__ )
os.unlink(snake_case__ )
# Settings
A_ = HfArgumentParser(PreprocessingArguments)
A_ = parser.parse_args()
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A_ = time.time()
A_ = load_dataset(args.dataset_name, split='''train''')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
A_ = time.time()
A_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
A_ = set(ds.unique('''hash'''))
A_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
A_ = time.time()
A_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A_ = time.time()
A_ , A_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
A_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
A_ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
A_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A_ = str(data_dir / F'''file-{file_number+1:012}.json''')
A_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 64 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_lowercase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase : Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : str = np.concatenate(lowerCamelCase_ , axis=0 )
_lowercase : Dict = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_55.0
_lowercase : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowercase : str = 2.0 * image - 1.0
_lowercase : Tuple = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
_lowercase : Any = torch.cat(lowerCamelCase_ , dim=0 )
return image
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0.99_95 ) -> Tuple:
if not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : List[Any] = True
_lowercase : Any = va.device
_lowercase : Union[str, Any] = va.cpu().numpy()
_lowercase : int = va.cpu().numpy()
_lowercase : int = np.sum(va * va / (np.linalg.norm(lowerCamelCase_ ) * np.linalg.norm(lowerCamelCase_ )) )
if np.abs(lowerCamelCase_ ) > DOT_THRESHOLD:
_lowercase : Any = (1 - t) * va + t * va
else:
_lowercase : Dict = np.arccos(lowerCamelCase_ )
_lowercase : str = np.sin(lowerCamelCase_ )
_lowercase : int = theta_a * t
_lowercase : Dict = np.sin(lowerCamelCase_ )
_lowercase : Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowercase : List[Any] = sin_theta_t / sin_theta_a
_lowercase : Dict = sa * va + sa * va
if inputs_are_torch:
_lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
return va
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
for param in model.parameters():
_lowercase : Any = value
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCamelCase, text_encoder=lowerCamelCase, clip_model=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, coca_model=lowerCamelCase, coca_tokenizer=lowerCamelCase, coca_transform=lowerCamelCase, )
_lowercase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size, lowerCamelCase)
else feature_extractor.size['shortest_edge']
)
_lowercase : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, lowerCamelCase)
set_requires_grad(self.clip_model, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase = "auto") -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = min(int(num_inference_steps * strength), lowerCamelCase)
_lowercase : List[Any] = max(num_inference_steps - init_timestep, 0)
_lowercase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase, torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase)}''')
_lowercase : Any = image.to(device=lowerCamelCase, dtype=lowerCamelCase)
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCamelCase)
]
_lowercase : int = torch.cat(lowerCamelCase, dim=0)
else:
_lowercase : int = self.vae.encode(lowerCamelCase).latent_dist.sample(lowerCamelCase)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : str = 0.1_8_2_1_5 * init_latents
_lowercase : List[str] = init_latents.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : List[str] = randn_tensor(init_latents.shape, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
# get latents
_lowercase : Any = self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : str = init_latents
return latents
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = self.coca_transform(lowerCamelCase).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_lowercase : List[str] = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
_lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>', '').rstrip(' .,')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.feature_extractor.preprocess(lowerCamelCase)
_lowercase : List[str] = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
_lowercase : int = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : int = image_embeddings_clip.repeat_interleave(lowerCamelCase, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = latents.detach().requires_grad_()
_lowercase : Union[str, Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Tuple = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_lowercase : Any = self.scheduler.alphas_cumprod[timestep]
_lowercase : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowercase : List[str] = torch.sqrt(lowerCamelCase)
_lowercase : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, lowerCamelCase):
_lowercase : Dict = self.scheduler.sigmas[index]
_lowercase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Dict = 1 / 0.1_8_2_1_5 * sample
_lowercase : Optional[Any] = self.vae.decode(lowerCamelCase).sample
_lowercase : int = (image / 2 + 0.5).clamp(0, 1)
_lowercase : Any = transforms.Resize(self.feature_extractor_size)(lowerCamelCase)
_lowercase : Optional[Any] = self.normalize(lowerCamelCase).to(latents.dtype)
_lowercase : List[str] = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : Optional[Any] = spherical_dist_loss(lowerCamelCase, lowerCamelCase).mean() * clip_guidance_scale
_lowercase : str = -torch.autograd.grad(lowerCamelCase, lowerCamelCase)[0]
if isinstance(self.scheduler, lowerCamelCase):
_lowercase : Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowercase : List[str] = noise_pred_original
else:
_lowercase : List[Any] = noise_pred_original - torch.sqrt(lowerCamelCase) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = 5_12, lowerCamelCase = 5_12, lowerCamelCase = 0.6, lowerCamelCase = 50, lowerCamelCase = 7.5, lowerCamelCase = 1, lowerCamelCase = 0.0, lowerCamelCase = 1_00, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, lowerCamelCase = 0.8, lowerCamelCase = 0.1, lowerCamelCase = 0.1, ) -> int:
"""simple docstring"""
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(lowerCamelCase)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(lowerCamelCase, torch.Generator) and batch_size > 1:
_lowercase : Dict = [generator] + [None] * (batch_size - 1)
_lowercase : Optional[int] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowercase : str = ', '.join(lowerCamelCase)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : List[Any] = self.get_image_description(lowerCamelCase)
if style_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : Dict = self.get_image_description(lowerCamelCase)
# get prompt text embeddings for content and style
_lowercase : Optional[int] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_lowercase : Union[str, Any] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : List[Any] = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_lowercase : Any = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# duplicate text embeddings for each generation per prompt
_lowercase : Dict = text_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# set timesteps
_lowercase : Dict = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_offset:
_lowercase : Any = 1
self.scheduler.set_timesteps(lowerCamelCase, **lowerCamelCase)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_lowercase , _lowercase : List[Any] = self.get_timesteps(lowerCamelCase, lowerCamelCase, self.device)
_lowercase : str = timesteps[:1].repeat(lowerCamelCase)
# Preprocess image
_lowercase : str = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : int = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : Optional[int] = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
if clip_guidance_scale > 0:
_lowercase : Optional[int] = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = slerp(
lowerCamelCase, lowerCamelCase, lowerCamelCase)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase : Tuple = content_text_input.input_ids.shape[-1]
_lowercase : Union[str, Any] = self.tokenizer([''], padding='max_length', max_length=lowerCamelCase, return_tensors='pt')
_lowercase : int = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_lowercase : Union[str, Any] = uncond_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowercase : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowercase : List[Any] = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='cpu', dtype=lowerCamelCase).to(
self.device)
else:
_lowercase : Any = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_lowercase : Tuple = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowercase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase : Dict = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_eta:
_lowercase : List[Any] = eta
# check if the scheduler accepts generator
_lowercase : Dict = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_lowercase : str = generator
with self.progress_bar(total=lowerCamelCase):
for i, t in enumerate(lowerCamelCase):
# expand the latents if we are doing classifier free guidance
_lowercase : List[str] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowercase : List[Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Dict = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowercase , _lowercase : Optional[Any] = noise_pred.chunk(2)
_lowercase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowercase : Tuple = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_lowercase , _lowercase : List[Any] = self.cond_fn(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Any = 1 / 0.1_8_2_1_5 * latents
_lowercase : List[str] = self.vae.decode(lowerCamelCase).sample
_lowercase : Tuple = (image / 2 + 0.5).clamp(0, 1)
_lowercase : List[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : List[Any] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase)
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = ort.SessionOptions()
_snake_case : Union[str, Any] = False
return options
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_snake_case : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""onnx""", safety_checker=a_, feature_extractor=a_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[Any] = """A red cat sitting on a park bench"""
_snake_case : Optional[int] = np.random.RandomState(0 )
_snake_case : Any = pipe(
prompt=a_, image=a_, mask_image=a_, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=a_, output_type="""np""", )
_snake_case : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 64 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = LDMTextToImagePipeline
_lowerCamelCase : List[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : List[Any] = False
def lowercase ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_UpperCAmelCase = CLIPTextModel(snake_case_ )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowercase ( self : int , snake_case_ : str , snake_case_ : Tuple=0 ):
if str(snake_case_ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(snake_case_ )
else:
_UpperCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase ( self : Dict ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = LDMTextToImagePipeline(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
_UpperCAmelCase = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowercase ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : int=torch.floataa , snake_case_ : Optional[Any]=0 ):
_UpperCAmelCase = torch.manual_seed(snake_case_ )
_UpperCAmelCase = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 3_2, 3_2) )
_UpperCAmelCase = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase ( self : List[Any] ):
_UpperCAmelCase = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_inputs(snake_case_ )
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_UpperCAmelCase = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_UpperCAmelCase = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Dict , snake_case_ : Dict , snake_case_ : int=torch.floataa , snake_case_ : Dict=0 ):
_UpperCAmelCase = torch.manual_seed(snake_case_ )
_UpperCAmelCase = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 3_2, 3_2) )
_UpperCAmelCase = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 5_0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase ( self : Tuple ):
_UpperCAmelCase = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_inputs(snake_case_ )
_UpperCAmelCase = pipe(**snake_case_ ).images[0]
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
_UpperCAmelCase = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 22 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_snake_case : int = value
elif weight_type == "weight_g":
_snake_case : str = value
elif weight_type == "weight_v":
_snake_case : Tuple = value
elif weight_type == "bias":
_snake_case : List[str] = value
else:
_snake_case : int = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : List[Any] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_snake_case : Optional[Any] = None
for name, value in fairseq_dict.items():
_snake_case : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : Dict = True
elif name.split(""".""" )[0] == "proj":
_snake_case : Dict = fairseq_model.proj
_snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : Dict = True
if "*" in mapped_key:
_snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : str = """weight_g"""
elif "weight_v" in name:
_snake_case : Optional[Any] = """weight_v"""
elif "bias" in name:
_snake_case : Union[str, Any] = """bias"""
elif "weight" in name:
_snake_case : int = """weight"""
else:
_snake_case : Optional[int] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = full_name.split("""conv_layers.""" )[-1]
_snake_case : Optional[int] = name.split(""".""" )
_snake_case : List[str] = int(items[0] )
_snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_snake_case : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_snake_case : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_snake_case : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = emb.weight.shape
_snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Any = f.readlines()
_snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines]
_snake_case : str = len(snake_case__ )
_snake_case : Tuple = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
_snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ )
_snake_case : List[str] = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
_snake_case : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_snake_case : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
_snake_case : Any = WavaVecaModel(snake_case__ )
_snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
_snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ )
_snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
_snake_case : Any = False
# add projection layer
_snake_case : int = nn.Parameter(projection_layer.weight )
_snake_case : Any = nn.Parameter(projection_layer.bias )
_snake_case : Any = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(snake_case__ , snake_case__ )
_snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) )
tokenizer.save_pretrained(snake_case__ )
_snake_case : str = hf_wavavec.config.to_dict()
_snake_case : List[str] = tokenizer.pad_token_id
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Union[str, Any] = tokenizer.eos_token_id
_snake_case : Optional[Any] = """speech_to_text_2"""
_snake_case : Optional[int] = """wav2vec2"""
_snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 64 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ) -> bool:
UpperCAmelCase : str = get_failure_array(_lowerCAmelCase )
# 2) Step through text searching for pattern
UpperCAmelCase , UpperCAmelCase : Optional[Any] = 0, 0 # index into text, pattern
while i < len(_lowerCAmelCase ):
if pattern[j] == text[i]:
if j == (len(_lowerCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCAmelCase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( _lowerCAmelCase : str ) -> list[int]:
UpperCAmelCase : Optional[Any] = [0]
UpperCAmelCase : str = 0
UpperCAmelCase : List[str] = 1
while j < len(_lowerCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCAmelCase : Union[str, Any] = failure[i - 1]
continue
j += 1
failure.append(_lowerCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCamelCase__: str = "abc1abc12"
UpperCamelCase__: str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCamelCase__: Any = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCamelCase__: Tuple = "ABABX"
UpperCamelCase__: Union[str, Any] = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
UpperCamelCase__: Any = "AAAB"
UpperCamelCase__: str = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
UpperCamelCase__: int = "abcdabcy"
UpperCamelCase__: Any = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
UpperCamelCase__: List[str] = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 23 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ):
"""simple docstring"""
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : Any ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : List[Any] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : str = 16
elif accelerator.mixed_precision != "no":
_snake_case : Optional[int] = 8
else:
_snake_case : Optional[int] = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
_snake_case : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
_snake_case : List[Any] = 2
# Initialize accelerator
_snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config["""lr"""]
_snake_case : str = int(config["""num_epochs"""] )
_snake_case : Union[str, Any] = int(config["""seed"""] )
_snake_case : Union[str, Any] = int(config["""batch_size"""] )
_snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ )
_snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
_snake_case : str = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : int = model(**snake_case__ )
_snake_case : str = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : int = model(**snake_case__ )
_snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
_snake_case : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : Dict = parser.parse_args()
_snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 64 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCamelCase__ ( snake_case_ : np.ndarray ) -> np.ndarray:
__snake_case , __snake_case , __snake_case = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def lowerCamelCase__ ( snake_case_ : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def lowerCamelCase__ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> np.ndarray:
__snake_case = np.zeros_like(snake_case_ )
__snake_case = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__snake_case = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__snake_case = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__snake_case = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
snake_case_ = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
snake_case_ = np.array(Image.open(lena_path))
# kernel to be applied
snake_case_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
snake_case_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
snake_case_ = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 24 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 0 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase__ : int = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
UpperCAmelCase__ : List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
UpperCAmelCase__ : Union[str, Any] = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = False ,):
if label_map is not None:
for old_id, new_id in label_map.items():
SCREAMING_SNAKE_CASE__ : Dict = new_id
# turn into Numpy arrays
SCREAMING_SNAKE_CASE__ : Dict = np.array(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(_snake_case )
if reduce_labels:
SCREAMING_SNAKE_CASE__ : Dict = 255
SCREAMING_SNAKE_CASE__ : Dict = label - 1
SCREAMING_SNAKE_CASE__ : int = 255
SCREAMING_SNAKE_CASE__ : Optional[int] = label != ignore_index
SCREAMING_SNAKE_CASE__ : Dict = np.not_equal(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = pred_label[mask]
SCREAMING_SNAKE_CASE__ : Any = np.array(_snake_case )[mask]
SCREAMING_SNAKE_CASE__ : Tuple = pred_label[pred_label == label]
SCREAMING_SNAKE_CASE__ : str = np.histogram(_snake_case ,bins=_snake_case ,range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ : Any = np.histogram(_snake_case ,bins=_snake_case ,range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ : List[str] = np.histogram(_snake_case ,bins=_snake_case ,range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros((num_labels,) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa )
for result, gt_seg_map in zip(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = intersect_and_union(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = total_intersect_and_union(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
# compute metrics
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
SCREAMING_SNAKE_CASE__ : Dict = total_area_intersect / total_area_union
SCREAMING_SNAKE_CASE__ : Optional[Any] = total_area_intersect / total_area_label
SCREAMING_SNAKE_CASE__ : int = np.nanmean(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = np.nanmean(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = all_acc
SCREAMING_SNAKE_CASE__ : List[Any] = iou
SCREAMING_SNAKE_CASE__ : Tuple = acc
if nan_to_num is not None:
SCREAMING_SNAKE_CASE__ : Any = {metric: np.nan_to_num(_snake_case ,nan=_snake_case ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = mean_iou(
results=SCREAMING_SNAKE_CASE__ , gt_seg_maps=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , ignore_index=SCREAMING_SNAKE_CASE__ , nan_to_num=SCREAMING_SNAKE_CASE__ , label_map=SCREAMING_SNAKE_CASE__ , reduce_labels=SCREAMING_SNAKE_CASE__ , )
return iou_result
| 25 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A_ = logging.get_logger(__name__)
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = None
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Tuple, a_: int, a_: int, a_: str, **a_: Dict ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any], a_: List[str] ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCamelCase_ ( cls: Tuple ):
'''simple docstring'''
return f"`pip install {cls.pip_package or cls.name}`"
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "optuna"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: int, a_: str, **a_: List[str] ):
'''simple docstring'''
return run_hp_search_optuna(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Any ):
'''simple docstring'''
return default_hp_space_optuna(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "ray"
lowercase__ = "'ray[tune]'"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def UpperCamelCase_ ( self: int, a_: Optional[Any], a_: int, a_: str, **a_: List[Any] ):
'''simple docstring'''
return run_hp_search_ray(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Tuple ):
'''simple docstring'''
return default_hp_space_ray(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "sigopt"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: str, **a_: int ):
'''simple docstring'''
return run_hp_search_sigopt(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: List[str] ):
'''simple docstring'''
return default_hp_space_sigopt(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "wandb"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: str, **a_: Union[str, Any] ):
'''simple docstring'''
return run_hp_search_wandb(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Any ):
'''simple docstring'''
return default_hp_space_wandb(a_ )
A_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case__ ) > 0:
_snake_case : Any = available_backends[0].name
if len(snake_case__ ) > 1:
logger.info(
F"{len(snake_case__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 64 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
_snake_case = None
_snake_case = {
"7B": 11008,
"13B": 13824,
"30B": 17920,
"65B": 22016,
"70B": 28672,
}
_snake_case = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def lowerCAmelCase_ ( snake_case_,snake_case_=1,snake_case_=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCAmelCase_ ( snake_case_ ):
with open(snake_case_,"""r""" ) as f:
return json.load(snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
with open(snake_case_,"""w""" ) as f:
json.dump(snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=True ):
os.makedirs(snake_case_,exist_ok=snake_case_ )
_A : Tuple = os.path.join(snake_case_,"""tmp""" )
os.makedirs(snake_case_,exist_ok=snake_case_ )
_A : int = read_json(os.path.join(snake_case_,"""params.json""" ) )
_A : Any = NUM_SHARDS[model_size]
_A : Dict = params["""n_layers"""]
_A : Optional[Any] = params["""n_heads"""]
_A : Union[str, Any] = n_heads // num_shards
_A : Dict = params["""dim"""]
_A : Optional[Any] = dim // n_heads
_A : List[Any] = 1_00_00.0
_A : List[str] = 1.0 / (base ** (torch.arange(0,snake_case_,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_A : str = params["""n_kv_heads"""] # for GQA / MQA
_A : List[Any] = n_heads_per_shard // num_key_value_heads
_A : List[str] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_A : Any = n_heads
_A : Optional[int] = n_heads_per_shard
_A : List[Any] = dim
# permute for sliced rotary
def permute(snake_case_,snake_case_=n_heads,snake_case_=dim,snake_case_=dim ):
return w.view(snake_case_,dima // n_heads // 2,2,snake_case_ ).transpose(1,2 ).reshape(snake_case_,snake_case_ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_A : str = torch.load(os.path.join(snake_case_,"""consolidated.00.pth""" ),map_location="""cpu""" )
else:
# Sharded
_A : int = [
torch.load(os.path.join(snake_case_,f'''consolidated.{i:02d}.pth''' ),map_location="""cpu""" )
for i in range(snake_case_ )
]
_A : Optional[int] = 0
_A : Optional[Any] = {"""weight_map""": {}}
for layer_i in range(snake_case_ ):
_A : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
_A : Union[str, Any] = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_A : Any = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
_A : Union[str, Any] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(snake_case_,snake_case_,snake_case_ )
for i in range(snake_case_ )
],dim=0,).reshape(snake_case_,snake_case_ ) )
_A : Optional[Any] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
snake_case_,snake_case_,snake_case_ )
for i in range(snake_case_ )
],dim=0,).reshape(snake_case_,snake_case_ ),snake_case_,snake_case_,snake_case_,)
_A : Union[str, Any] = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
snake_case_,snake_case_,snake_case_ )
for i in range(snake_case_ )
],dim=0,).reshape(snake_case_,snake_case_ )
_A : str = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(snake_case_ )],dim=1 )
_A : Tuple = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(snake_case_ )],dim=0 )
_A : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(snake_case_ )],dim=1 )
_A : Any = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(snake_case_ )],dim=0 )
_A : Any = inv_freq
for k, v in state_dict.items():
_A : Dict = filename
param_count += v.numel()
torch.save(snake_case_,os.path.join(snake_case_,snake_case_ ) )
_A : Optional[Any] = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
_A : Optional[Any] = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_A : Optional[int] = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(snake_case_ )],dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(snake_case_ )],dim=0 ),
}
for k, v in state_dict.items():
_A : List[str] = filename
param_count += v.numel()
torch.save(snake_case_,os.path.join(snake_case_,snake_case_ ) )
# Write configs
_A : Tuple = {"""total_size""": param_count * 2}
write_json(snake_case_,os.path.join(snake_case_,"""pytorch_model.bin.index.json""" ) )
_A : Any = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_A : int = params["""multiple_of"""] if """multiple_of""" in params else 256
_A : str = LlamaConfig(
hidden_size=snake_case_,intermediate_size=compute_intermediate_size(snake_case_,snake_case_,snake_case_ ),num_attention_heads=params["""n_heads"""],num_hidden_layers=params["""n_layers"""],rms_norm_eps=params["""norm_eps"""],num_key_value_heads=snake_case_,)
config.save_pretrained(snake_case_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_A : Optional[Any] = LlamaForCausalLM.from_pretrained(snake_case_,torch_dtype=torch.floataa,low_cpu_mem_usage=snake_case_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(snake_case_,safe_serialization=snake_case_ )
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Initialize the tokenizer based on the `spm` model
_A : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
_A : List[Any] = tokenizer_class(snake_case_ )
tokenizer.save_pretrained(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""",help="""Location of LLaMA weights, which contains tokenizer.model and model folders""",)
parser.add_argument(
"""--model_size""",choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""],)
parser.add_argument(
"""--output_dir""",help="""Location to write HF model and tokenizer""",)
parser.add_argument("""--safe_serialization""",type=snake_case_,help="""Whether or not to save using `safetensors`.""" )
_A : Union[str, Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir,input_base_path=os.path.join(args.input_dir,args.model_size ),model_size=args.model_size,safe_serialization=args.safe_serialization,)
_A : int = os.path.join(args.input_dir,"""tokenizer.model""" )
write_tokenizer(args.output_dir,snake_case_ )
if __name__ == "__main__":
main()
| 26 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : str = kwargs.pop("""feature_extractor""" )
_snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
_snake_case : Dict = self.image_processor
_snake_case : Any = False
def __call__( self: Any, *a_: Any, **a_: Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
_snake_case : Dict = kwargs.pop("""images""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : Optional[int] = args[0]
_snake_case : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, *a_, **a_ )
if text is not None:
_snake_case : Tuple = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : List[str] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case : Any = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : int = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : Dict = self.tokenizer.get_added_vocab()
_snake_case : str = {}
while tokens:
_snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE )
if start_token is None:
break
_snake_case : List[Any] = start_token.group(1 )
_snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE )
_snake_case : Dict = start_token.group()
if end_token is None:
_snake_case : List[Any] = tokens.replace(a_, """""" )
else:
_snake_case : List[str] = end_token.group()
_snake_case : str = re.escape(a_ )
_snake_case : str = re.escape(a_ )
_snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE )
if content is not None:
_snake_case : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ )
if value:
if len(a_ ) == 1:
_snake_case : List[str] = value[0]
_snake_case : List[str] = value
else: # leaf nodes
_snake_case : Tuple = []
for leaf in content.split(r"""<sep/>""" ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : int = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
_snake_case : int = output[key][0]
_snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __UpperCamelCase :
def __init__( self , __a , __a=14 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : Union[str, Any] = parent
__a : str = batch_size
__a : Optional[int] = seq_length
__a : Any = is_training
__a : Tuple = use_token_type_ids
__a : Optional[int] = use_input_mask
__a : Optional[int] = use_labels
__a : Dict = use_mc_token_ids
__a : Union[str, Any] = vocab_size
__a : Optional[Any] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : List[str] = num_attention_heads
__a : List[Any] = intermediate_size
__a : int = hidden_act
__a : List[str] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : Union[str, Any] = type_vocab_size
__a : int = type_sequence_label_size
__a : Union[str, Any] = initializer_range
__a : Optional[Any] = num_labels
__a : List[str] = num_choices
__a : Any = scope
__a : List[Any] = self.vocab_size - 1
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_input_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Tuple = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
if self.use_mc_token_ids:
__a : Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__a : List[Any] = None
__a : Dict = None
__a : Tuple = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Any = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
__a : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Optional[int] = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
__a : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Dict = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
__a : List[str] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Any = config_and_inputs
__a : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Union[str, Any] = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : str = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
A_ = (CTRLLMHeadModel,) if is_torch_available() else ()
A_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = CTRLModelTester(self )
__a : str = ConfigTester(self , config_class=__a , n_embd=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__a )
__a : Union[str, Any] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__a ) # Legal the president is
__a : List[Any] = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__a : List[str] = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a )
| 27 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def A ( self : Optional[int] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
if tokenize_kwargs is None:
UpperCamelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
UpperCamelCase = truncation
UpperCamelCase = tokenize_kwargs
UpperCamelCase = {}
if return_tensors is not None:
UpperCamelCase = return_tensors
return preprocess_params, {}, postprocess_params
def A ( self : List[Any] , UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.framework
UpperCamelCase = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
return model_inputs
def A ( self : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str]=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Tuple , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : str ):
"""simple docstring"""
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = CodeGenTokenizer
def __init__( self: Union[str, Any], a_: List[Any]=None, a_: str=None, a_: str=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: str="<|endoftext|>", a_: List[Any]=False, **a_: List[str], ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
if kwargs.pop("""add_bos_token""", a_ ):
_snake_case : str = kwargs.pop("""name_or_path""", """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Dict = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Dict = add_prefix_space
_snake_case : str = pre_tok_class(**a_ )
_snake_case : List[Any] = add_prefix_space
def UpperCamelCase_ ( self: Any, *a_: Any, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], *a_: Any, **a_: List[str] ):
'''simple docstring'''
_snake_case : Dict = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: str, a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: List[str], ):
'''simple docstring'''
_snake_case : Any = super().decode(
token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, )
if truncate_before_pattern is not None and len(a_ ) > 0:
_snake_case : List[str] = self.truncate(a_, a_ )
return decoded_text
def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Optional[Any] ):
'''simple docstring'''
def find_re(a_: Dict, a_: str, a_: Union[str, Any] ):
_snake_case : Any = pattern.search(a_, a_ )
return m.start() if m else -1
_snake_case : Tuple = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern]
_snake_case : List[Any] = list(re.finditer("""^print""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : int = completion[: prints[1].start()]
_snake_case : List[str] = list(re.finditer("""^def""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : List[Any] = completion[: defs[1].start()]
_snake_case : int = 0
_snake_case : List[Any] = [
pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 64 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Tuple = tokenizer(example['content'] , truncation=__snake_case )['input_ids']
UpperCAmelCase_ : List[Any] = len(example['content'] ) / len(output['input_ids'] )
return output
__UpperCAmelCase = HfArgumentParser(PretokenizationArguments)
__UpperCAmelCase = parser.parse_args()
if args.num_workers is None:
__UpperCAmelCase = multiprocessing.cpu_count()
__UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__UpperCAmelCase = time.time()
__UpperCAmelCase = load_dataset(args.dataset_name, split='train')
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
__UpperCAmelCase = time.time()
__UpperCAmelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
__UpperCAmelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 29 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_snake_case : Tuple = 1_92
_snake_case : Any = 7_68
_snake_case : Any = 12
_snake_case : List[Any] = 3
_snake_case : int = [8_00, 13_33]
_snake_case : Tuple = False
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = 3_30
_snake_case : List[str] = 14
_snake_case : List[str] = 6
_snake_case : Union[str, Any] = 13_20
elif "yolos_s" in yolos_name:
_snake_case : Union[str, Any] = 3_84
_snake_case : List[str] = 15_36
_snake_case : Any = 12
_snake_case : Optional[int] = 6
elif "yolos_b" in yolos_name:
_snake_case : Dict = [8_00, 13_44]
_snake_case : str = 91
_snake_case : Optional[Any] = """huggingface/label-files"""
_snake_case : str = """coco-detection-id2label.json"""
_snake_case : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[str] = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosConfig , snake_case__ : bool = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_snake_case : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[: config.hidden_size, :]
_snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
_snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Tuple = in_proj_weight[-config.hidden_size :, :]
_snake_case : List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
if "backbone" in name:
_snake_case : str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
_snake_case : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
_snake_case : str = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
_snake_case : str = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
_snake_case : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_snake_case : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
_snake_case : str = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_snake_case : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : int = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
_snake_case : Union[str, Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
_snake_case : str = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
_snake_case : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosForObjectDetection ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : List[str] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
_snake_case : Optional[Any] = key.split(""".""" )
_snake_case : Optional[Any] = int(key_split[2] )
_snake_case : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_snake_case : str = val[:dim, :]
_snake_case : Optional[Any] = val[
dim : dim * 2, :
]
_snake_case : Optional[Any] = val[-dim:, :]
else:
_snake_case : Dict = val[:dim]
_snake_case : Any = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
else:
_snake_case : Tuple = val
return orig_state_dict
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
_snake_case : Optional[Any] = get_yolos_config(snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
_snake_case : Optional[Any] = YolosForObjectDetection(snake_case__ )
model.eval()
_snake_case : Optional[Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
_snake_case : List[str] = 8_00 if yolos_name != """yolos_ti""" else 5_12
_snake_case : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
_snake_case , _snake_case : Optional[int] = outputs.logits, outputs.pred_boxes
_snake_case , _snake_case : Dict = None, None
if yolos_name == "yolos_ti":
_snake_case : Optional[Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
_snake_case : Tuple = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
_snake_case : List[str] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
_snake_case : List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
_snake_case : Dict = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
_snake_case : Union[str, Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
_snake_case : Optional[Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
_snake_case : int = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
_snake_case : Optional[int] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
_snake_case : Dict = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
_snake_case : str = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization="""hustvl""" )
model.push_to_hub(snake_case__ , organization="""hustvl""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 64 | 0 |
def a ( snake_case__: int = 3 , snake_case__: int = 7 , snake_case__: int = 1_000_000 ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = 1
for current_denominator in range(1 , limit + 1 ):
lowercase_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowercase_ = current_numerator
lowercase_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 30 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str]=False ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : List[Any] = """"""
else:
_snake_case : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
_snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
_snake_case : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Union[str, Any] = val
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = ViTMSNConfig()
_snake_case : Any = 10_00
_snake_case : Tuple = """datasets/huggingface/label-files"""
_snake_case : Dict = """imagenet-1k-id2label.json"""
_snake_case : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , """r""" ) )
_snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[Any] = idalabel
_snake_case : str = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_snake_case : Tuple = 3_84
_snake_case : Dict = 15_36
_snake_case : Tuple = 6
elif "l16" in checkpoint_url:
_snake_case : Any = 10_24
_snake_case : int = 40_96
_snake_case : str = 24
_snake_case : Optional[int] = 16
_snake_case : List[Any] = 0.1
elif "b4" in checkpoint_url:
_snake_case : Tuple = 4
elif "l7" in checkpoint_url:
_snake_case : int = 7
_snake_case : Dict = 10_24
_snake_case : Optional[Any] = 40_96
_snake_case : Any = 24
_snake_case : Union[str, Any] = 16
_snake_case : Optional[int] = 0.1
_snake_case : int = ViTMSNModel(snake_case__ )
_snake_case : Optional[int] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )["""target_encoder"""]
_snake_case : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
_snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
_snake_case : str = ViTImageProcessor(
size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : Any = image_processor(images=snake_case__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_snake_case : int = model(**snake_case__ )
_snake_case : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_snake_case : Optional[Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
_snake_case : str = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
_snake_case : Optional[int] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
_snake_case : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
_snake_case : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 64 | 0 |
'''simple docstring'''
import os
import sys
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__SCREAMING_SNAKE_CASE : List[str] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase_ ( *_UpperCAmelCase : Dict , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase_ ( *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase_ ( *_UpperCAmelCase : Any , **_UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase_ ( *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase_ ( *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase_ ( *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Any ) -> int:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase_ ( *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase )
| 31 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = list(snake_case__ )
_snake_case : List[Any] = list(snake_case__ )
_snake_case : List[Any] = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count += 1
_snake_case : Any = """_"""
if count > 1:
return False
else:
return "".join(snake_case__ )
def UpperCAmelCase__ (snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = []
while True:
_snake_case : Union[str, Any] = ["""$"""] * len(snake_case__ )
_snake_case : int = []
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
_snake_case : List[Any] = compare_string(binary[i] , binary[j] )
if k is False:
_snake_case : Dict = """*"""
_snake_case : List[Any] = """*"""
temp.append("""X""" )
for i in range(len(snake_case__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case__ ) == 0:
return pi
_snake_case : Optional[int] = list(set(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Sequence[float] ):
"""simple docstring"""
_snake_case : Optional[int] = []
for minterm in minterms:
_snake_case : Any = """"""
for _ in range(snake_case__ ):
_snake_case : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case__ )
return temp
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = list(snake_case__ )
_snake_case : List[str] = list(snake_case__ )
_snake_case : Tuple = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase__ (snake_case__ : list[list[int]] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : Any = []
_snake_case : Union[str, Any] = [0] * len(snake_case__ )
for i in range(len(chart[0] ) ):
_snake_case : Tuple = 0
_snake_case : str = -1
for j in range(len(snake_case__ ) ):
if chart[j][i] == 1:
count += 1
_snake_case : Union[str, Any] = j
if count == 1:
_snake_case : Union[str, Any] = 1
for i in range(len(snake_case__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case__ ) ):
_snake_case : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
_snake_case : Optional[int] = 0
_snake_case : str = -1
_snake_case : Any = 0
for i in range(len(snake_case__ ) ):
_snake_case : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_snake_case : Dict = count_n
_snake_case : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case__ ) ):
_snake_case : Optional[Any] = 0
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )]
for i in range(len(snake_case__ ) ):
_snake_case : Any = prime_implicants[i].count("""_""" )
for j in range(len(snake_case__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ):
_snake_case : Tuple = 1
return chart
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = int(input("""Enter the no. of variables\n""" ) )
_snake_case : List[str] = [
float(snake_case__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_snake_case : List[str] = decimal_to_binary(snake_case__ , snake_case__ )
_snake_case : str = check(snake_case__ )
print("""Prime Implicants are:""" )
print(snake_case__ )
_snake_case : int = prime_implicant_chart(snake_case__ , snake_case__ )
_snake_case : str = selection(snake_case__ , snake_case__ )
print("""Essential Prime Implicants are:""" )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 64 | 0 |
import datasets
from .evaluate import evaluate
UpperCAmelCase_ : Optional[Any] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
UpperCAmelCase_ : int = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
UpperCAmelCase_ : List[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
a_ : int = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
a_ : List[Any] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
a_ : Any = evaluate(dataset=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ )
return score
| 32 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
stooge(snake_case__ , 0 , len(snake_case__ ) - 1 )
return arr
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case : Dict = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case__ , i + t , (snake_case__) )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 64 | 0 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowercase ( __snake_case : int ):
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class _UpperCAmelCase :
def __init__( self : Dict , A : Iterable[torch.nn.Parameter] , A : float = 0.9999 , A : float = 0.0 , A : int = 0 , A : bool = False , A : Union[float, int] = 1.0 , A : Union[float, int] = 2 / 3 , A : Optional[Any] = None , A : Dict[str, Any] = None , **A : str , ) -> Union[str, Any]:
if isinstance(A , torch.nn.Module ):
lowercase_ : List[Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , A , standard_warn=A , )
lowercase_ : int = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase_ : Dict = True
if kwargs.get('''max_value''' , A ) is not None:
lowercase_ : Tuple = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , A , standard_warn=A )
lowercase_ : List[str] = kwargs['''max_value''']
if kwargs.get('''min_value''' , A ) is not None:
lowercase_ : Union[str, Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , A , standard_warn=A )
lowercase_ : int = kwargs['''min_value''']
lowercase_ : Union[str, Any] = list(A )
lowercase_ : int = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , A ) is not None:
lowercase_ : Union[str, Any] = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , A , standard_warn=A )
self.to(device=kwargs['''device'''] )
lowercase_ : Optional[Any] = None
lowercase_ : Optional[int] = decay
lowercase_ : Optional[int] = min_decay
lowercase_ : List[Any] = update_after_step
lowercase_ : int = use_ema_warmup
lowercase_ : Optional[int] = inv_gamma
lowercase_ : Dict = power
lowercase_ : str = 0
lowercase_ : Any = None # set in `step()`
lowercase_ : List[str] = model_cls
lowercase_ : int = model_config
@classmethod
def A ( cls : int , A : int , A : Optional[int] ) -> "EMAModel":
lowercase_ , lowercase_ : List[str] = model_cls.load_config(A , return_unused_kwargs=A )
lowercase_ : Optional[int] = model_cls.from_pretrained(A )
lowercase_ : List[str] = cls(model.parameters() , model_cls=A , model_config=model.config )
ema_model.load_state_dict(A )
return ema_model
def A ( self : Optional[Any] , A : List[str] ) -> Any:
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
lowercase_ : Union[str, Any] = self.model_cls.from_config(self.model_config )
lowercase_ : Dict = self.state_dict()
state_dict.pop('''shadow_params''' , A )
model.register_to_config(**A )
self.copy_to(model.parameters() )
model.save_pretrained(A )
def A ( self : str , A : int ) -> float:
lowercase_ : Any = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase_ : Any = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase_ : Optional[Any] = (1 + step) / (10 + step)
lowercase_ : str = min(A , self.decay )
# make sure decay is not smaller than min_decay
lowercase_ : Any = max(A , self.min_decay )
return cur_decay_value
@torch.no_grad()
def A ( self : Optional[int] , A : Iterable[torch.nn.Parameter] ) -> Dict:
if isinstance(A , torch.nn.Module ):
lowercase_ : str = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , A , standard_warn=A , )
lowercase_ : Optional[int] = parameters.parameters()
lowercase_ : List[Any] = list(A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase_ : Union[str, Any] = self.get_decay(self.optimization_step )
lowercase_ : Tuple = decay
lowercase_ : Optional[int] = 1 - decay
lowercase_ : Optional[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase_ : Tuple = deepspeed.zero.GatheredParameters(A , modifier_rank=A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(A )
def A ( self : Tuple , A : Iterable[torch.nn.Parameter] ) -> None:
lowercase_ : int = list(A )
for s_param, param in zip(self.shadow_params , A ):
param.data.copy_(s_param.to(param.device ).data )
def A ( self : Any , A : Optional[int]=None , A : Optional[int]=None ) -> None:
lowercase_ : Union[str, Any] = [
p.to(device=A , dtype=A ) if p.is_floating_point() else p.to(device=A )
for p in self.shadow_params
]
def A ( self : Union[str, Any] ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def A ( self : Tuple , A : Iterable[torch.nn.Parameter] ) -> None:
lowercase_ : Tuple = [param.detach().cpu().clone() for param in parameters]
def A ( self : Union[str, Any] , A : Iterable[torch.nn.Parameter] ) -> None:
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , A ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase_ : int = None
def A ( self : Tuple , A : dict ) -> None:
lowercase_ : Dict = copy.deepcopy(A )
lowercase_ : List[Any] = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
lowercase_ : Union[str, Any] = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , A ):
raise ValueError('''Invalid min_decay''' )
lowercase_ : Any = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , A ):
raise ValueError('''Invalid optimization_step''' )
lowercase_ : Union[str, Any] = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , A ):
raise ValueError('''Invalid update_after_step''' )
lowercase_ : List[str] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , A ):
raise ValueError('''Invalid use_ema_warmup''' )
lowercase_ : Union[str, Any] = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
lowercase_ : Any = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
lowercase_ : Optional[int] = state_dict.get('''shadow_params''' , A )
if shadow_params is not None:
lowercase_ : Any = shadow_params
if not isinstance(self.shadow_params , A ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(A , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 33 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["note_seq"]
def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(self, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
| 64 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class _a ( __a ):
__a : Optional[int] = """owlvit_text_model"""
def __init__( self : Tuple , lowercase : Union[str, Any]=49_408 , lowercase : str=512 , lowercase : int=2_048 , lowercase : Optional[Any]=12 , lowercase : Any=8 , lowercase : Optional[int]=16 , lowercase : Union[str, Any]="quick_gelu" , lowercase : Dict=1E-5 , lowercase : Tuple=0.0 , lowercase : str=0.02 , lowercase : Dict=1.0 , lowercase : str=0 , lowercase : List[str]=49_406 , lowercase : int=49_407 , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_act
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = attention_dropout
UpperCAmelCase = initializer_range
UpperCAmelCase = initializer_factor
@classmethod
def A ( cls : int , lowercase : Union[str, os.PathLike] , **lowercase : List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
UpperCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase , **lowercase )
class _a ( __a ):
__a : Dict = """owlvit_vision_model"""
def __init__( self : Tuple , lowercase : str=768 , lowercase : Dict=3_072 , lowercase : int=12 , lowercase : Tuple=12 , lowercase : Optional[int]=3 , lowercase : Optional[int]=768 , lowercase : Optional[int]=32 , lowercase : Union[str, Any]="quick_gelu" , lowercase : Dict=1E-5 , lowercase : List[Any]=0.0 , lowercase : List[Any]=0.02 , lowercase : str=1.0 , **lowercase : str , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = hidden_act
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = attention_dropout
UpperCAmelCase = initializer_range
UpperCAmelCase = initializer_factor
@classmethod
def A ( cls : Optional[Any] , lowercase : Union[str, os.PathLike] , **lowercase : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
UpperCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase , **lowercase )
class _a ( __a ):
__a : Dict = """owlvit"""
__a : Optional[Any] = True
def __init__( self : Tuple , lowercase : Tuple=None , lowercase : Optional[Any]=None , lowercase : List[str]=512 , lowercase : Any=2.6592 , lowercase : List[Any]=True , **lowercase : str , ):
'''simple docstring'''
super().__init__(**lowercase )
if text_config is None:
UpperCAmelCase = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
UpperCAmelCase = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
UpperCAmelCase = OwlViTTextConfig(**lowercase )
UpperCAmelCase = OwlViTVisionConfig(**lowercase )
UpperCAmelCase = projection_dim
UpperCAmelCase = logit_scale_init_value
UpperCAmelCase = return_dict
UpperCAmelCase = 1.0
@classmethod
def A ( cls : Tuple , lowercase : Union[str, os.PathLike] , **lowercase : List[str] ):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowercase , **lowercase )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase , **lowercase )
@classmethod
def A ( cls : List[str] , lowercase : Dict , lowercase : Dict , **lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = text_config
UpperCAmelCase = vision_config
return cls.from_dict(lowercase , **lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
class _a ( __a ):
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
def A ( self : Tuple , lowercase : "ProcessorMixin" , lowercase : int = -1 , lowercase : int = -1 , lowercase : Optional["TensorType"] = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=lowercase , seq_length=lowercase , framework=lowercase )
UpperCAmelCase = super().generate_dummy_inputs(
processor.image_processor , batch_size=lowercase , framework=lowercase )
return {**text_input_dict, **image_input_dict}
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 14
| 34 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : int = data
_snake_case : Dict = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def UpperCamelCase_ ( a_: Optional[Any], a_: Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_snake_case : Optional[int] = self.data + padding + struct.pack(""">Q""", 8 * len(self.data ) )
return padded_data
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = list(struct.unpack(""">16L""", a_ ) ) + [0] * 64
for i in range(16, 80 ):
_snake_case : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.padding()
_snake_case : str = self.split_blocks()
for block in self.blocks:
_snake_case : Any = self.expand_block(a_ )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.h
for i in range(0, 80 ):
if 0 <= i < 20:
_snake_case : int = (b & c) | ((~b) & d)
_snake_case : str = 0X5a827999
elif 20 <= i < 40:
_snake_case : Optional[int] = b ^ c ^ d
_snake_case : str = 0X6ed9eba1
elif 40 <= i < 60:
_snake_case : List[Any] = (b & c) | (b & d) | (c & d)
_snake_case : List[Any] = 0X8f1bbcdc
elif 60 <= i < 80:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : int = 0Xca62c1d6
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = (
self.rotate(a_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(a_, 30 ),
c,
d,
)
_snake_case : Union[str, Any] = (
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = B"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_snake_case : Union[str, Any] = parser.parse_args()
_snake_case : List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_snake_case : str = f.read()
else:
_snake_case : int = bytes(snake_case__ , """utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 100 , ) -> float:
snake_case__ : Any = x_start
snake_case__ : List[Any] = fnc(_lowerCAmelCase )
snake_case__ : str = 0.0
for _ in range(_lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case__ : Tuple = (x_end - x_start) / steps + xa
snake_case__ : str = fnc(_lowerCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case__ : Optional[Any] = xa
snake_case__ : Tuple = fxa
return length
if __name__ == "__main__":
def __snake_case( _lowerCAmelCase ) -> Tuple:
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
__a = 10
while i <= 10_0000:
print(F"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10
| 35 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(__a )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "rag"
lowercase__ = True
def __init__( self: Union[str, Any], a_: int=None, a_: Tuple=True, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, a_: Optional[Any]=" / ", a_: Tuple=" // ", a_: List[Any]=5, a_: Dict=300, a_: Tuple=768, a_: Optional[Any]=8, a_: int="wiki_dpr", a_: Any="train", a_: Optional[int]="compressed", a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: str=False, a_: Dict=0.0, a_: Union[str, Any]=True, a_: Union[str, Any]=False, a_: str=False, a_: List[str]=False, a_: Union[str, Any]=True, a_: Any=None, **a_: List[Any], ):
'''simple docstring'''
super().__init__(
bos_token_id=a_, pad_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, forced_eos_token_id=a_, is_encoder_decoder=a_, prefix=a_, vocab_size=a_, **a_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_snake_case : Union[str, Any] = kwargs.pop("""question_encoder""" )
_snake_case : List[str] = question_encoder_config.pop("""model_type""" )
_snake_case : Union[str, Any] = kwargs.pop("""generator""" )
_snake_case : Any = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_snake_case : Union[str, Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Optional[Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Any = reduce_loss
_snake_case : Optional[int] = label_smoothing
_snake_case : Dict = exclude_bos_score
_snake_case : int = do_marginalize
_snake_case : Optional[Any] = title_sep
_snake_case : Any = doc_sep
_snake_case : List[str] = n_docs
_snake_case : Tuple = max_combined_length
_snake_case : Optional[Any] = dataset
_snake_case : Union[str, Any] = dataset_split
_snake_case : Tuple = index_name
_snake_case : Any = retrieval_vector_size
_snake_case : Union[str, Any] = retrieval_batch_size
_snake_case : str = passages_path
_snake_case : Tuple = index_path
_snake_case : List[Any] = use_dummy_dataset
_snake_case : Optional[Any] = output_retrieved
_snake_case : Tuple = do_deduplication
_snake_case : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_snake_case : Dict = getattr(self.generator, """forced_eos_token_id""", a_ )
@classmethod
def UpperCamelCase_ ( cls: Any, a_: PretrainedConfig, a_: PretrainedConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : List[str] = self.question_encoder.to_dict()
_snake_case : Tuple = self.generator.to_dict()
_snake_case : Dict = self.__class__.model_type
return output
| 64 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
requires_backends(self, "vision")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def snake_case__ ( self, __a=None):
'''simple docstring'''
_lowerCAmelCase : Any = {}
if top_k is not None:
_lowerCAmelCase : Dict = top_k
return {}, {}, postprocess_params
def __call__( self, __a, **__a):
'''simple docstring'''
return super().__call__(__a, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(__a)
_lowerCAmelCase : Tuple = self.image_processor(images=__a, return_tensors=self.framework)
return model_inputs
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model(**__a)
return model_outputs
def snake_case__ ( self, __a, __a=5):
'''simple docstring'''
if top_k > self.model.config.num_labels:
_lowerCAmelCase : Any = self.model.config.num_labels
if self.framework == "pt":
_lowerCAmelCase : int = model_outputs.logits.softmax(-1)[0]
_lowerCAmelCase , _lowerCAmelCase : Any = probs.topk(__a)
elif self.framework == "tf":
_lowerCAmelCase : Optional[Any] = stable_softmax(model_outputs.logits, axis=-1)[0]
_lowerCAmelCase : int = tf.math.top_k(__a, k=__a)
_lowerCAmelCase , _lowerCAmelCase : int = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
_lowerCAmelCase : Dict = scores.tolist()
_lowerCAmelCase : List[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a, __a)]
| 36 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
A_ = TypeVar('''T''')
A_ = Union[List[T], Tuple[T, ...]]
A_ = Union[T, List[T], Dict[str, T]]
A_ = Union[str, bytes, os.PathLike]
| 64 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ : Union[str, Any] = """"""
else:
lowerCAmelCase__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : int = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Dict = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : int = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = dct.pop(UpperCamelCase )
lowerCAmelCase__ : int = val
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase__ : List[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = ViTConfig()
lowerCAmelCase__ : Any = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Optional[int] = int(vit_name[-12:-10] )
lowerCAmelCase__ : Dict = int(vit_name[-9:-6] )
else:
lowerCAmelCase__ : Union[str, Any] = 1000
lowerCAmelCase__ : Any = """huggingface/label-files"""
lowerCAmelCase__ : List[Any] = """imagenet-1k-id2label.json"""
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : int = idalabel
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = int(vit_name[-6:-4] )
lowerCAmelCase__ : Union[str, Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
lowerCAmelCase__ : List[Any] = 192
lowerCAmelCase__ : Tuple = 768
lowerCAmelCase__ : List[Any] = 12
lowerCAmelCase__ : str = 3
elif vit_name[9:].startswith("""small""" ):
lowerCAmelCase__ : List[str] = 384
lowerCAmelCase__ : List[Any] = 1536
lowerCAmelCase__ : Any = 12
lowerCAmelCase__ : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
lowerCAmelCase__ : Optional[Any] = 768
lowerCAmelCase__ : Any = 2304
lowerCAmelCase__ : Tuple = 8
lowerCAmelCase__ : str = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
lowerCAmelCase__ : Any = 1024
lowerCAmelCase__ : List[str] = 4096
lowerCAmelCase__ : Optional[Any] = 24
lowerCAmelCase__ : Optional[int] = 16
elif vit_name[4:].startswith("""huge""" ):
lowerCAmelCase__ : List[str] = 1280
lowerCAmelCase__ : int = 5120
lowerCAmelCase__ : Optional[int] = 32
lowerCAmelCase__ : List[Any] = 16
# load original model from timm
lowerCAmelCase__ : Optional[Any] = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase )
lowerCAmelCase__ : str = create_rename_keys(UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : Union[str, Any] = ViTModel(UpperCamelCase ).eval()
else:
lowerCAmelCase__ : Any = ViTForImageClassification(UpperCamelCase ).eval()
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase__ : int = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase__ : Union[str, Any] = ViTImageProcessor(size=config.image_size )
lowerCAmelCase__ : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase__ : Tuple = encoding["""pixel_values"""]
lowerCAmelCase__ : str = model(UpperCamelCase )
if base_model:
lowerCAmelCase__ : int = timm_model.forward_features(UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCamelCase , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase__ : Optional[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase , outputs.logits , atol=1e-3 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 37 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
_snake_case : List[Any] = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_snake_case , _snake_case : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
_snake_case , _snake_case : List[str] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 64 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {'''vocab_file''': '''vocab.json'''}
UpperCAmelCase_ : Union[str, Any] = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
UpperCAmelCase_ : Dict = {'''mgp-str''': 27}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int="[GO]" , __lowerCamelCase : Union[str, Any]="[GO]" , __lowerCamelCase : Tuple="[s]" , __lowerCamelCase : Any="[GO]" , **__lowerCamelCase : Optional[int] ):
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase :Tuple = json.load(__lowerCamelCase )
UpperCamelCase :int = {v: k for k, v in self.vocab.items()}
@property
def _A ( self : Optional[Any] ):
return len(self.vocab )
def _A ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def _A ( self : Union[str, Any] , __lowerCamelCase : Tuple ):
UpperCamelCase :List[Any] = []
for s in text:
char_tokens.extend(__lowerCamelCase )
return char_tokens
def _A ( self : int , __lowerCamelCase : List[Any] ):
return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) )
def _A ( self : str , __lowerCamelCase : List[Any] ):
return self.decoder.get(__lowerCamelCase )
def _A ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__lowerCamelCase ) )
return
UpperCamelCase :List[str] = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + """\n""" )
return (vocab_file,)
| 38 |
"""simple docstring"""
from math import factorial
A_ = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64 | 0 |
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
_UpperCAmelCase , _UpperCAmelCase = head.next, head
while fast and fast.next:
_UpperCAmelCase = fast.next.next
_UpperCAmelCase = slow.next
_UpperCAmelCase = slow.next
_UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
_UpperCAmelCase = None
while second:
_UpperCAmelCase = second.next
_UpperCAmelCase = node
_UpperCAmelCase = second
_UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_UpperCAmelCase = node.next
_UpperCAmelCase = head.next
return True
def __A ( __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = head
while fast and fast.next:
_UpperCAmelCase , _UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
_UpperCAmelCase = [slow.val]
while slow.next:
_UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_UpperCAmelCase = cur.next
return True
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
if not head or not head.next:
return True
_UpperCAmelCase = {}
_UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(__lowerCAmelCase )
else:
_UpperCAmelCase = [pos]
_UpperCAmelCase = head.next
pos += 1
_UpperCAmelCase = pos - 1
_UpperCAmelCase = 0
for v in d.values():
if len(__lowerCAmelCase ) % 2 != 0:
middle += 1
else:
_UpperCAmelCase = 0
for i in range(0 , len(__lowerCAmelCase ) ):
if v[i] + v[len(__lowerCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 39 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : int ):
"""simple docstring"""
if len(snake_case__ ) < k or k < 0:
raise ValueError("""Invalid Input""" )
_snake_case : Optional[int] = sum(array[:k] )
for i in range(len(snake_case__ ) - k ):
_snake_case : Optional[Any] = current_sum - array[i] + array[i + k]
_snake_case : List[str] = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A_ = [randint(-10_00, 10_00) for i in range(1_00)]
A_ = randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 64 | 0 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
a : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
a : str = n - k
# Calculate C(n,k)
for i in range(A_ ):
result *= n - i
result //= i + 1
return result
def lowercase ( A_ )-> int:
'''simple docstring'''
return binomial_coefficient(2 * node_count , A_ ) // (node_count + 1)
def lowercase ( A_ )-> int:
'''simple docstring'''
if n < 0:
raise ValueError("factorial() not defined for negative values" )
a : List[Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowercase ( A_ )-> int:
'''simple docstring'''
return catalan_number(A_ ) * factorial(A_ )
if __name__ == "__main__":
__lowercase = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 40 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''yjernite/retribert-base-uncased''': 5_12,
}
A_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = RetriBertTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: int, a_: int=None, a_: Dict=None, a_: Any=True, a_: int="[UNK]", a_: Any="[SEP]", a_: List[Any]="[PAD]", a_: List[Any]="[CLS]", a_: str="[MASK]", a_: Dict=True, a_: Optional[int]=None, **a_: Tuple, ):
'''simple docstring'''
super().__init__(
a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, )
_snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", a_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", a_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars
):
_snake_case : Dict = getattr(a_, normalizer_state.pop("""type""" ) )
_snake_case : List[Any] = do_lower_case
_snake_case : List[str] = strip_accents
_snake_case : Tuple = tokenize_chinese_chars
_snake_case : Tuple = normalizer_class(**a_ )
_snake_case : List[str] = do_lower_case
def UpperCamelCase_ ( self: Any, a_: str, a_: Optional[int]=None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self: List[str], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
| 64 | 0 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : Dict =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
lowerCamelCase__ : Optional[int] = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , UpperCamelCase )
if matches:
lowerCamelCase__ : Optional[int] = float(matches[1] )
lowerCamelCase__ : Optional[int] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCamelCase__ : List[Any] = 1001
lowerCamelCase__ : Any = """imagenet-1k-id2label.json"""
lowerCamelCase__ : Union[str, Any] = """huggingface/label-files"""
lowerCamelCase__ : List[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : str = {int(UpperCamelCase ) + 1: v for k, v in idalabel.items()}
lowerCamelCase__ : Dict = """background"""
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : List[str] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = get_mobilenet_va_config(UpperCamelCase )
# Load 🤗 model
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCamelCase__ : Optional[Any] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
lowerCamelCase__ : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ : str = model(**UpperCamelCase )
lowerCamelCase__ : Dict = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCamelCase__ : Optional[Any] = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCamelCase__ : Union[str, Any] = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
lowerCamelCase__ : Tuple = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
lowerCamelCase__ : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A : Optional[Any] =parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 41 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"add_prefix_space": True}
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : List[Any] = {"""unk_token""": """<unk>"""}
_snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def UpperCamelCase_ ( self: Any, **a_: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Any, **a_: str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = """lower newer"""
_snake_case : Tuple = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
_snake_case : Optional[Any] = """lower newer"""
_snake_case : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : int = tokenizer.tokenize(a_, add_prefix_space=a_ )
self.assertListEqual(a_, a_ )
_snake_case : str = tokens + [tokenizer.unk_token]
_snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : int = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : Dict = """lower newer"""
# Testing tokenization
_snake_case : Dict = tokenizer.tokenize(a_, add_prefix_space=a_ )
_snake_case : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids without special tokens
_snake_case : Optional[Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : Tuple = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids with special tokens
_snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : int = tokenizer.encode(a_, add_prefix_space=a_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
# Testing the unknown token
_snake_case : Tuple = tokens + [rust_tokenizer.unk_token]
_snake_case : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Dict, *a_: Dict, **a_: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int, a_: List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
# Simple input
_snake_case : Any = """This is a simple input"""
_snake_case : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Optional[int] = ("""This is a simple input""", """This is a pair""")
_snake_case : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
# Pair input
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="""<pad>""" )
# Simple input
_snake_case : List[Any] = """This is a simple input"""
_snake_case : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
_snake_case : Any = ("""This is a simple input""", """This is a pair""")
_snake_case : str = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_snake_case : str = tokenizer.pad_token_id
_snake_case : Optional[int] = tokenizer(a_, padding="""max_length""", max_length=30, return_tensors="""np""" )
_snake_case : Dict = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
_snake_case : Tuple = tokenizer(*a_, padding="""max_length""", max_length=60, return_tensors="""np""" )
_snake_case : Optional[Any] = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = """$$$"""
_snake_case : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=a_, add_bos_token=a_ )
_snake_case : str = """This is a simple input"""
_snake_case : int = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Tuple = tokenizer(a_ )
_snake_case : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0], a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case : Optional[int] = tokenizer.decode(out_s.input_ids )
_snake_case : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_snake_case : Dict = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_snake_case : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_snake_case : Optional[Any] = tokenizer.encode(a_ )
_snake_case : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_snake_case : Optional[Any] = tokenizer.decode(a_, truncate_before_pattern=a_ )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
| 64 | 0 |
'''simple docstring'''
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_snake_case = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__A ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
_snake_case = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
_snake_case = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_snake_case = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_snake_case = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_snake_case = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_snake_case = datetime.date(int(__A ) , int(__A ) , int(__A ) )
# Start math
if m <= 2:
_snake_case = y - 1
_snake_case = m + 12
# maths var
_snake_case = int(str(__A )[:2] )
_snake_case = int(str(__A )[2:] )
_snake_case = int(2.6 * m - 5.3_9 )
_snake_case = int(c / 4 )
_snake_case = int(k / 4 )
_snake_case = int(d + k )
_snake_case = int(t + u + v + x )
_snake_case = int(z - (2 * c) )
_snake_case = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_snake_case = F'Your date {date_input}, is a {days[str(__A )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Union[str, Any] = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
lowercase : Union[str, Any] = parser.parse_args()
zeller(args.date_input)
| 42 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A_ = re.compile(r'''\s+''')
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(snake_case__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Any = [len(snake_case__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )}
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str]=5 ):
"""simple docstring"""
_snake_case : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_snake_case : Tuple = example["""content"""].splitlines()
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=0.05 ):
"""simple docstring"""
_snake_case : Optional[Any] = ["""unit tests""", """test file""", """configuration file"""]
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : Dict = 0
_snake_case : str = 0
# first test
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case : Optional[int] = example["""content"""].count("""\n""" )
_snake_case : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = ["""def """, """class """, """for """, """while """]
_snake_case : str = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str]=4 ):
"""simple docstring"""
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : str = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Optional[Any] = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""]
_snake_case : Optional[Any] = len(example["""content"""] ) / len(snake_case__ )
return {"ratio": ratio}
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[int] = {}
results.update(get_hash(snake_case__ ) )
results.update(line_stats(snake_case__ ) )
results.update(alpha_stats(snake_case__ ) )
results.update(char_token_ratio(snake_case__ ) )
results.update(is_autogenerated(snake_case__ ) )
results.update(is_config_or_test(snake_case__ ) )
results.update(has_no_keywords(snake_case__ ) )
results.update(has_few_assignments(snake_case__ ) )
return results
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if not check_uniques(snake_case__ , snake_case__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """rb""" ) as f_in:
with gzip.open(str(snake_case__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case__ , snake_case__ )
os.unlink(snake_case__ )
# Settings
A_ = HfArgumentParser(PreprocessingArguments)
A_ = parser.parse_args()
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A_ = time.time()
A_ = load_dataset(args.dataset_name, split='''train''')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
A_ = time.time()
A_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
A_ = set(ds.unique('''hash'''))
A_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
A_ = time.time()
A_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A_ = time.time()
A_ , A_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
A_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
A_ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
A_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A_ = str(data_dir / F'''file-{file_number+1:012}.json''')
A_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 64 | 0 |
import sys
import turtle
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__lowercase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__lowercase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 43 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = ort.SessionOptions()
_snake_case : Union[str, Any] = False
return options
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_snake_case : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""onnx""", safety_checker=a_, feature_extractor=a_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[Any] = """A red cat sitting on a park bench"""
_snake_case : Optional[int] = np.random.RandomState(0 )
_snake_case : Any = pipe(
prompt=a_, image=a_, mask_image=a_, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=a_, output_type="""np""", )
_snake_case : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 64 | 0 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray ,_lowerCamelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
_lowerCAmelCase : Tuple = math.sqrt(_lowerCamelCase )
_lowerCAmelCase : Dict = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray ,_lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> np.ndarray:
_lowerCAmelCase : List[Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
_lowerCAmelCase : List[str] = np.zeros((kernel_size, kernel_size) )
for i in range(0 ,_lowerCamelCase ):
for j in range(0 ,_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : int ,) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = np.zeros(img.shape )
_lowerCAmelCase : Dict = get_gauss_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Tuple = img.shape
for i in range(kernel_size // 2 ,size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 ,size_y - kernel_size // 2 ):
_lowerCAmelCase : Union[str, Any] = get_slice(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[Any] = img_s - img_s[kernel_size // 2, kernel_size // 2]
_lowerCAmelCase : List[Any] = vec_gaussian(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = np.multiply(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : int = np.multiply(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = val
return imga
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> tuple:
_lowerCAmelCase : List[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
_lowerCAmelCase : Optional[Any] = float(args[2] ) if args[2:] else 1.0
_lowerCAmelCase : Dict = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_lowerCAmelCase : List[str] = int(args[4] )
_lowerCAmelCase : List[str] = kernel_size + abs(kernel_size % 2 - 1 )
else:
_lowerCAmelCase : Tuple = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_a , _a , _a , _a : Optional[Any] = parse_args(sys.argv)
_a : Optional[Any] = cva.imread(filename, 0)
cva.imshow('input image', img)
_a : List[str] = img / 255
_a : Union[str, Any] = out.astype('float32')
_a : Dict = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_a : Optional[int] = out * 255
_a : Union[str, Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 44 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_snake_case : int = value
elif weight_type == "weight_g":
_snake_case : str = value
elif weight_type == "weight_v":
_snake_case : Tuple = value
elif weight_type == "bias":
_snake_case : List[str] = value
else:
_snake_case : int = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : List[Any] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_snake_case : Optional[Any] = None
for name, value in fairseq_dict.items():
_snake_case : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : Dict = True
elif name.split(""".""" )[0] == "proj":
_snake_case : Dict = fairseq_model.proj
_snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : Dict = True
if "*" in mapped_key:
_snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : str = """weight_g"""
elif "weight_v" in name:
_snake_case : Optional[Any] = """weight_v"""
elif "bias" in name:
_snake_case : Union[str, Any] = """bias"""
elif "weight" in name:
_snake_case : int = """weight"""
else:
_snake_case : Optional[int] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = full_name.split("""conv_layers.""" )[-1]
_snake_case : Optional[int] = name.split(""".""" )
_snake_case : List[str] = int(items[0] )
_snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_snake_case : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_snake_case : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_snake_case : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = emb.weight.shape
_snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Any = f.readlines()
_snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines]
_snake_case : str = len(snake_case__ )
_snake_case : Tuple = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
_snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ )
_snake_case : List[str] = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
_snake_case : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_snake_case : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
_snake_case : Any = WavaVecaModel(snake_case__ )
_snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
_snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ )
_snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
_snake_case : Any = False
# add projection layer
_snake_case : int = nn.Parameter(projection_layer.weight )
_snake_case : Any = nn.Parameter(projection_layer.bias )
_snake_case : Any = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(snake_case__ , snake_case__ )
_snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) )
tokenizer.save_pretrained(snake_case__ )
_snake_case : str = hf_wavavec.config.to_dict()
_snake_case : List[str] = tokenizer.pad_token_id
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Union[str, Any] = tokenizer.eos_token_id
_snake_case : Optional[Any] = """speech_to_text_2"""
_snake_case : Optional[int] = """wav2vec2"""
_snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 64 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 45 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ):
"""simple docstring"""
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : Any ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : List[Any] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : str = 16
elif accelerator.mixed_precision != "no":
_snake_case : Optional[int] = 8
else:
_snake_case : Optional[int] = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
_snake_case : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
_snake_case : List[Any] = 2
# Initialize accelerator
_snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config["""lr"""]
_snake_case : str = int(config["""num_epochs"""] )
_snake_case : Union[str, Any] = int(config["""seed"""] )
_snake_case : Union[str, Any] = int(config["""batch_size"""] )
_snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ )
_snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
_snake_case : str = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : int = model(**snake_case__ )
_snake_case : str = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : int = model(**snake_case__ )
_snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
_snake_case : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : Dict = parser.parse_args()
_snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 64 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase = BertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(F'Building PyTorch model from configuration: {config}' )
lowerCAmelCase = BertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_bert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 46 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 0 |
'''simple docstring'''
import torch
from transformers import AutoModel
class A__ ( torch.nn.Module ):
def __init__( self : List[Any] , _a : Union[str, Any]="sayef/fsner-bert-base-uncased" ) -> List[str]:
'''simple docstring'''
super(_a , self ).__init__()
_SCREAMING_SNAKE_CASE =AutoModel.from_pretrained(_a , return_dict=_a )
_SCREAMING_SNAKE_CASE =torch.nn.CosineSimilarity(3 , 1e-08 )
_SCREAMING_SNAKE_CASE =torch.nn.Softmax(dim=1 )
def A ( self : int , **_a : str ) -> Union[str, Any]:
'''simple docstring'''
return self.bert(**_a ).last_hidden_state
def A ( self : str , _a : int ) -> Optional[Any]:
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=_a )
def A ( self : Dict , _a : List[str] , _a : Dict , _a : Dict=1 ) -> Optional[int]:
'''simple docstring'''
return self.softmax(T * self.cos(_a , _a ) )
def A ( self : Optional[Any] , _a : List[str] , _a : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =W_supports['sizes'].tolist()
_SCREAMING_SNAKE_CASE =W_supports['start_token_id'].item()
_SCREAMING_SNAKE_CASE =W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_SCREAMING_SNAKE_CASE =self.BERT(**_a )
_SCREAMING_SNAKE_CASE =self.BERT(**_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =W_supports['input_ids'] == start_token_id
_SCREAMING_SNAKE_CASE =W_supports['input_ids'] == end_token_id
for i, size in enumerate(_a ):
if i == 0:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =support_sizes[i - 1]
_SCREAMING_SNAKE_CASE =S[s : s + size][start_token_masks[s : s + size]]
_SCREAMING_SNAKE_CASE =S[s : s + size][end_token_masks[s : s + size]]
_SCREAMING_SNAKE_CASE =torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_SCREAMING_SNAKE_CASE =torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_SCREAMING_SNAKE_CASE =torch.vstack((p_starts, p_start) )
_SCREAMING_SNAKE_CASE =torch.vstack((p_ends, p_end) )
else:
_SCREAMING_SNAKE_CASE =p_start
_SCREAMING_SNAKE_CASE =p_end
return p_starts, p_ends
| 47 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A_ = logging.get_logger(__name__)
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = None
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Tuple, a_: int, a_: int, a_: str, **a_: Dict ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any], a_: List[str] ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCamelCase_ ( cls: Tuple ):
'''simple docstring'''
return f"`pip install {cls.pip_package or cls.name}`"
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "optuna"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: int, a_: str, **a_: List[str] ):
'''simple docstring'''
return run_hp_search_optuna(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Any ):
'''simple docstring'''
return default_hp_space_optuna(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "ray"
lowercase__ = "'ray[tune]'"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def UpperCamelCase_ ( self: int, a_: Optional[Any], a_: int, a_: str, **a_: List[Any] ):
'''simple docstring'''
return run_hp_search_ray(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Tuple ):
'''simple docstring'''
return default_hp_space_ray(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "sigopt"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: str, **a_: int ):
'''simple docstring'''
return run_hp_search_sigopt(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: List[str] ):
'''simple docstring'''
return default_hp_space_sigopt(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "wandb"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: str, **a_: Union[str, Any] ):
'''simple docstring'''
return run_hp_search_wandb(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Any ):
'''simple docstring'''
return default_hp_space_wandb(a_ )
A_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case__ ) > 0:
_snake_case : Any = available_backends[0].name
if len(snake_case__ ) > 1:
logger.info(
F"{len(snake_case__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 64 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
set_seed(770)
SCREAMING_SNAKE_CASE__ : Tuple = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.dirname(os.path.abspath(__file__))
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(os.path.expanduser('~'), '.cache')
SCREAMING_SNAKE_CASE__ : str = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : List[str] = model_type
if use_small:
key += "_small"
return os.path.join(_SCREAMING_SNAKE_CASE ,REMOTE_MODEL_PATHS[key]["file_name"] )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
os.makedirs(_SCREAMING_SNAKE_CASE ,exist_ok=_SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE ,filename=_SCREAMING_SNAKE_CASE ,local_dir=_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE="text" ) -> Optional[int]:
if model_type == "text":
lowerCamelCase : Optional[int] = BarkSemanticModel
lowerCamelCase : int = BarkSemanticConfig
lowerCamelCase : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCamelCase : Optional[Any] = BarkCoarseModel
lowerCamelCase : List[str] = BarkCoarseConfig
lowerCamelCase : str = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCamelCase : Any = BarkFineModel
lowerCamelCase : List[Any] = BarkFineConfig
lowerCamelCase : Union[str, Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCamelCase : int = f'''{model_type}_small''' if use_small else model_type
lowerCamelCase : str = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["repo_id"] ,model_info["file_name"] )
lowerCamelCase : Tuple = torch.load(_SCREAMING_SNAKE_CASE ,map_location=_SCREAMING_SNAKE_CASE )
# this is a hack
lowerCamelCase : List[Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
lowerCamelCase : Optional[int] = model_args["vocab_size"]
lowerCamelCase : Dict = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCamelCase : Union[str, Any] = model_args.pop("n_head" )
lowerCamelCase : List[Any] = model_args.pop("n_embd" )
lowerCamelCase : List[Any] = model_args.pop("n_layer" )
lowerCamelCase : int = ConfigClass(**checkpoint["model_args"] )
lowerCamelCase : Optional[Any] = ModelClass(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = GenerationConfigClass()
lowerCamelCase : Dict = model_generation_config
lowerCamelCase : Optional[Any] = checkpoint["model"]
# fixup checkpoint
lowerCamelCase : List[str] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(_SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
lowerCamelCase : Union[str, Any] = k[len(_SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
lowerCamelCase : List[Any] = new_k.replace(_SCREAMING_SNAKE_CASE ,new_layer_name_dict[old_layer_name] )
lowerCamelCase : int = state_dict.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCamelCase : Dict = {k for k in extra_keys if not k.endswith(".attn.bias" )}
lowerCamelCase : Optional[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCamelCase : Tuple = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(_SCREAMING_SNAKE_CASE ,strict=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = checkpoint["best_val_loss"].item()
logger.info(f'''model loaded: {round(n_params/1e6 ,1 )}M params, {round(_SCREAMING_SNAKE_CASE ,3 )} loss''' )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE="text" ) -> Optional[int]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCamelCase : Optional[Any] = "cpu" # do conversion on cpu
lowerCamelCase : Tuple = _get_ckpt_path(_SCREAMING_SNAKE_CASE ,use_small=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = _load_model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,model_type=_SCREAMING_SNAKE_CASE ,use_small=_SCREAMING_SNAKE_CASE )
# load bark initial model
lowerCamelCase : Optional[int] = _bark_load_model(_SCREAMING_SNAKE_CASE ,"cpu" ,model_type=_SCREAMING_SNAKE_CASE ,use_small=_SCREAMING_SNAKE_CASE )
if model_type == "text":
lowerCamelCase : int = bark_model["model"]
if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
lowerCamelCase : Optional[int] = 5
lowerCamelCase : int = 10
if model_type in ["text", "coarse"]:
lowerCamelCase : Union[str, Any] = torch.randint(256 ,(batch_size, sequence_length) ,dtype=torch.int )
lowerCamelCase : Tuple = bark_model(_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
# take last logits
lowerCamelCase : str = output_new_model_total.logits[:, [-1], :]
else:
lowerCamelCase : str = 3
lowerCamelCase : Union[str, Any] = 8
lowerCamelCase : Optional[int] = torch.randint(256 ,(batch_size, sequence_length, n_codes_total) ,dtype=torch.int )
lowerCamelCase : int = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = bark_model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[str]:
lowerCamelCase : Dict = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE ,"config.json" ) )
lowerCamelCase : Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE ,"config.json" ) )
lowerCamelCase : Tuple = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE ,"config.json" ) )
lowerCamelCase : Tuple = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
lowerCamelCase : str = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
lowerCamelCase : Union[str, Any] = BarkConfig.from_sub_model_configs(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config ,coarseAcoustic.generation_config ,fineAcoustic.generation_config )
lowerCamelCase : List[str] = BarkModel(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = semantic
lowerCamelCase : Optional[Any] = coarseAcoustic
lowerCamelCase : Union[str, Any] = fineAcoustic
lowerCamelCase : str = codec
lowerCamelCase : Tuple = bark_generation_config
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
bark.save_pretrained(_SCREAMING_SNAKE_CASE ,repo_id=_SCREAMING_SNAKE_CASE ,push_to_hub=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 48 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : str = kwargs.pop("""feature_extractor""" )
_snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
_snake_case : Dict = self.image_processor
_snake_case : Any = False
def __call__( self: Any, *a_: Any, **a_: Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
_snake_case : Dict = kwargs.pop("""images""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : Optional[int] = args[0]
_snake_case : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, *a_, **a_ )
if text is not None:
_snake_case : Tuple = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : List[str] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case : Any = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : int = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : Dict = self.tokenizer.get_added_vocab()
_snake_case : str = {}
while tokens:
_snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE )
if start_token is None:
break
_snake_case : List[Any] = start_token.group(1 )
_snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE )
_snake_case : Dict = start_token.group()
if end_token is None:
_snake_case : List[Any] = tokens.replace(a_, """""" )
else:
_snake_case : List[str] = end_token.group()
_snake_case : str = re.escape(a_ )
_snake_case : str = re.escape(a_ )
_snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE )
if content is not None:
_snake_case : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ )
if value:
if len(a_ ) == 1:
_snake_case : List[str] = value[0]
_snake_case : List[str] = value
else: # leaf nodes
_snake_case : Tuple = []
for leaf in content.split(r"""<sep/>""" ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : int = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
_snake_case : int = output[key][0]
_snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__SCREAMING_SNAKE_CASE) as mock_head:
__a = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = GPTaTokenizerFast.from_pretrained('''gpt2''')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__SCREAMING_SNAKE_CASE) as mock_head:
__a = GPTaTokenizerFast.from_pretrained('''gpt2''')
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
try:
__a = tempfile.mktemp()
with open(__SCREAMING_SNAKE_CASE , '''wb''') as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , __SCREAMING_SNAKE_CASE)
__a = AlbertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE)
finally:
os.remove(__SCREAMING_SNAKE_CASE)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json'''):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''') as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , __SCREAMING_SNAKE_CASE)
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''')
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''')
@is_staging_test
class _A ( unittest.TestCase ):
UpperCamelCase__ : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _lowerCamelCase ( cls : List[Any]):
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Optional[Any]):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''')
except HTTPError:
pass
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = BertTokenizer(__SCREAMING_SNAKE_CASE)
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id='''test-tokenizer''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = BertTokenizer(__SCREAMING_SNAKE_CASE)
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def _lowerCamelCase ( self : Any):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = CustomTokenizer(__SCREAMING_SNAKE_CASE)
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token)
__a = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = BertTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE)
bert_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = CustomTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE)
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token)
__a = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''')
__a = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''')
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = Trie()
trie.add('''Hello 友達''')
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}})
trie.add('''Hello''')
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}})
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''') , ['''[CLS] This is a extra_id_100'''])
trie.add('''[CLS]''')
trie.add('''extra_id_1''')
trie.add('''extra_id_100''')
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''') , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''])
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = Trie()
trie.add('''A''')
self.assertEqual(trie.split('''ABC''') , ['''A''', '''BC'''])
self.assertEqual(trie.split('''BCA''') , ['''BC''', '''A'''])
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = Trie()
trie.add('''TOKEN]''')
trie.add('''[SPECIAL_TOKEN]''')
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''') , ['''This is something ''', '''[SPECIAL_TOKEN]'''])
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = Trie()
trie.add('''A''')
trie.add('''P''')
trie.add('''[SPECIAL_TOKEN]''')
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''') , ['''This is something ''', '''[SPECIAL_TOKEN]'''])
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = Trie()
trie.add('''AB''')
trie.add('''B''')
trie.add('''C''')
self.assertEqual(trie.split('''ABC''') , ['''AB''', '''C'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = Trie()
trie.add('''ABC''')
trie.add('''B''')
trie.add('''CD''')
self.assertEqual(trie.split('''ABCD''') , ['''ABC''', '''D'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = Trie()
__a = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3])
self.assertEqual(__SCREAMING_SNAKE_CASE , ['''AB''', '''C'''])
| 49 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_UpperCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : int , **UpperCAmelCase : Any ) -> Tuple:
super().__init__(**UpperCAmelCase )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self : Any , UpperCAmelCase : Union[np.ndarray, bytes, str] , **UpperCAmelCase : Dict ) -> Tuple:
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Any , **UpperCAmelCase : Tuple ) -> Optional[int]:
lowerCamelCase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowerCamelCase__ : List[str] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def A_ ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]="This is a sound of {}." ) -> List[str]:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCamelCase__ : Dict = requests.get(UpperCAmelCase ).content
else:
with open(UpperCAmelCase , 'rb' ) as f:
lowerCamelCase__ : List[Any] = f.read()
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = ffmpeg_read(UpperCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(UpperCAmelCase , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
lowerCamelCase__ : Optional[int] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
lowerCamelCase__ : Dict = candidate_labels
lowerCamelCase__ : Any = [hypothesis_template.format(UpperCAmelCase ) for x in candidate_labels]
lowerCamelCase__ : Optional[int] = self.tokenizer(UpperCAmelCase , return_tensors=self.framework , padding=UpperCAmelCase )
lowerCamelCase__ : Dict = [text_inputs]
return inputs
def A_ ( self : Any , UpperCAmelCase : Tuple ) -> List[str]:
lowerCamelCase__ : str = model_inputs.pop('candidate_labels' )
lowerCamelCase__ : Dict = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCAmelCase ):
lowerCamelCase__ : str = text_inputs[0]
else:
# Batching case.
lowerCamelCase__ : Tuple = text_inputs[0][0]
lowerCamelCase__ : int = self.model(**UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def A_ ( self : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
lowerCamelCase__ : Optional[int] = model_outputs.pop('candidate_labels' )
lowerCamelCase__ : List[Any] = model_outputs['logits'][0]
if self.framework == "pt":
lowerCamelCase__ : Optional[int] = logits.softmax(dim=0 )
lowerCamelCase__ : Tuple = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
lowerCamelCase__ : Any = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase , UpperCAmelCase ) , key=lambda UpperCAmelCase : -x[0] )
]
return result
| 50 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = CodeGenTokenizer
def __init__( self: Union[str, Any], a_: List[Any]=None, a_: str=None, a_: str=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: str="<|endoftext|>", a_: List[Any]=False, **a_: List[str], ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
if kwargs.pop("""add_bos_token""", a_ ):
_snake_case : str = kwargs.pop("""name_or_path""", """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Dict = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Dict = add_prefix_space
_snake_case : str = pre_tok_class(**a_ )
_snake_case : List[Any] = add_prefix_space
def UpperCamelCase_ ( self: Any, *a_: Any, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], *a_: Any, **a_: List[str] ):
'''simple docstring'''
_snake_case : Dict = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: str, a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: List[str], ):
'''simple docstring'''
_snake_case : Any = super().decode(
token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, )
if truncate_before_pattern is not None and len(a_ ) > 0:
_snake_case : List[str] = self.truncate(a_, a_ )
return decoded_text
def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Optional[Any] ):
'''simple docstring'''
def find_re(a_: Dict, a_: str, a_: Union[str, Any] ):
_snake_case : Any = pattern.search(a_, a_ )
return m.start() if m else -1
_snake_case : Tuple = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern]
_snake_case : List[Any] = list(re.finditer("""^print""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : int = completion[: prints[1].start()]
_snake_case : List[str] = list(re.finditer("""^def""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : List[Any] = completion[: defs[1].start()]
_snake_case : int = 0
_snake_case : List[Any] = [
pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 64 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A () -> Dict:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__A ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def A () -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def A () -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__A ):
http_head('''https://huggingface.co''' )
| 51 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_snake_case : Tuple = 1_92
_snake_case : Any = 7_68
_snake_case : Any = 12
_snake_case : List[Any] = 3
_snake_case : int = [8_00, 13_33]
_snake_case : Tuple = False
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = 3_30
_snake_case : List[str] = 14
_snake_case : List[str] = 6
_snake_case : Union[str, Any] = 13_20
elif "yolos_s" in yolos_name:
_snake_case : Union[str, Any] = 3_84
_snake_case : List[str] = 15_36
_snake_case : Any = 12
_snake_case : Optional[int] = 6
elif "yolos_b" in yolos_name:
_snake_case : Dict = [8_00, 13_44]
_snake_case : str = 91
_snake_case : Optional[Any] = """huggingface/label-files"""
_snake_case : str = """coco-detection-id2label.json"""
_snake_case : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[str] = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosConfig , snake_case__ : bool = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_snake_case : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[: config.hidden_size, :]
_snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
_snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Tuple = in_proj_weight[-config.hidden_size :, :]
_snake_case : List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
if "backbone" in name:
_snake_case : str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
_snake_case : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
_snake_case : str = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
_snake_case : str = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
_snake_case : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_snake_case : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
_snake_case : str = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_snake_case : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : int = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
_snake_case : Union[str, Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
_snake_case : str = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
_snake_case : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosForObjectDetection ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : List[str] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
_snake_case : Optional[Any] = key.split(""".""" )
_snake_case : Optional[Any] = int(key_split[2] )
_snake_case : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_snake_case : str = val[:dim, :]
_snake_case : Optional[Any] = val[
dim : dim * 2, :
]
_snake_case : Optional[Any] = val[-dim:, :]
else:
_snake_case : Dict = val[:dim]
_snake_case : Any = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
else:
_snake_case : Tuple = val
return orig_state_dict
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
_snake_case : Optional[Any] = get_yolos_config(snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
_snake_case : Optional[Any] = YolosForObjectDetection(snake_case__ )
model.eval()
_snake_case : Optional[Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
_snake_case : List[str] = 8_00 if yolos_name != """yolos_ti""" else 5_12
_snake_case : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
_snake_case , _snake_case : Optional[int] = outputs.logits, outputs.pred_boxes
_snake_case , _snake_case : Dict = None, None
if yolos_name == "yolos_ti":
_snake_case : Optional[Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
_snake_case : Tuple = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
_snake_case : List[str] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
_snake_case : List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
_snake_case : Dict = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
_snake_case : Union[str, Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
_snake_case : Optional[Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
_snake_case : int = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
_snake_case : Optional[int] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
_snake_case : Dict = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
_snake_case : str = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization="""hustvl""" )
model.push_to_hub(snake_case__ , organization="""hustvl""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 64 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = "laion/clap-htsat-unfused"
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : int = self.get_feature_extractor()
UpperCamelCase : Optional[int] = ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase : Tuple = self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
UpperCamelCase : Any = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.get_feature_extractor()
UpperCamelCase : List[Any] = self.get_tokenizer()
UpperCamelCase : Tuple = ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
UpperCamelCase : Optional[Any] = floats_list((3, 1000) )
UpperCamelCase : Any = feature_extractor(A_ , return_tensors="np" )
UpperCamelCase : Union[str, Any] = processor(audios=A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.get_feature_extractor()
UpperCamelCase : Optional[Any] = self.get_tokenizer()
UpperCamelCase : List[Any] = ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
UpperCamelCase : List[Any] = "This is a test string"
UpperCamelCase : List[Any] = processor(text=A_ )
UpperCamelCase : List[str] = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.get_feature_extractor()
UpperCamelCase : List[Any] = self.get_tokenizer()
UpperCamelCase : Optional[int] = ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
UpperCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase : Optional[Any] = processor.batch_decode(A_ )
UpperCamelCase : List[str] = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.get_feature_extractor()
UpperCamelCase : Dict = self.get_tokenizer()
UpperCamelCase : Union[str, Any] = ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 52 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str]=False ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : List[Any] = """"""
else:
_snake_case : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
_snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
_snake_case : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Union[str, Any] = val
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = ViTMSNConfig()
_snake_case : Any = 10_00
_snake_case : Tuple = """datasets/huggingface/label-files"""
_snake_case : Dict = """imagenet-1k-id2label.json"""
_snake_case : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , """r""" ) )
_snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[Any] = idalabel
_snake_case : str = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_snake_case : Tuple = 3_84
_snake_case : Dict = 15_36
_snake_case : Tuple = 6
elif "l16" in checkpoint_url:
_snake_case : Any = 10_24
_snake_case : int = 40_96
_snake_case : str = 24
_snake_case : Optional[int] = 16
_snake_case : List[Any] = 0.1
elif "b4" in checkpoint_url:
_snake_case : Tuple = 4
elif "l7" in checkpoint_url:
_snake_case : int = 7
_snake_case : Dict = 10_24
_snake_case : Optional[Any] = 40_96
_snake_case : Any = 24
_snake_case : Union[str, Any] = 16
_snake_case : Optional[int] = 0.1
_snake_case : int = ViTMSNModel(snake_case__ )
_snake_case : Optional[int] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )["""target_encoder"""]
_snake_case : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
_snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
_snake_case : str = ViTImageProcessor(
size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : Any = image_processor(images=snake_case__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_snake_case : int = model(**snake_case__ )
_snake_case : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_snake_case : Optional[Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
_snake_case : str = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
_snake_case : Optional[int] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
_snake_case : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
_snake_case : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 64 | 0 |
'''simple docstring'''
import torch
from transformers import AutoModel
class snake_case ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __A : List[Any]="sayef/fsner-bert-base-uncased" ):
super(__A , self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(__A , return_dict=__A )
__UpperCamelCase = torch.nn.CosineSimilarity(3 , 1e-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def _lowerCamelCase ( self : Tuple , **__A : Optional[int] ):
return self.bert(**__A ).last_hidden_state
def _lowerCamelCase ( self : Tuple , __A : Tuple ):
return token_embeddings.sum(2 , keepdim=__A )
def _lowerCamelCase ( self : List[Any] , __A : str , __A : int , __A : str=1 ):
return self.softmax(T * self.cos(__A , __A ) )
def _lowerCamelCase ( self : Optional[int] , __A : str , __A : Any ):
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**__A )
__UpperCamelCase = self.BERT(**__A )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__A ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 53 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = list(snake_case__ )
_snake_case : List[Any] = list(snake_case__ )
_snake_case : List[Any] = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count += 1
_snake_case : Any = """_"""
if count > 1:
return False
else:
return "".join(snake_case__ )
def UpperCAmelCase__ (snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = []
while True:
_snake_case : Union[str, Any] = ["""$"""] * len(snake_case__ )
_snake_case : int = []
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
_snake_case : List[Any] = compare_string(binary[i] , binary[j] )
if k is False:
_snake_case : Dict = """*"""
_snake_case : List[Any] = """*"""
temp.append("""X""" )
for i in range(len(snake_case__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case__ ) == 0:
return pi
_snake_case : Optional[int] = list(set(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Sequence[float] ):
"""simple docstring"""
_snake_case : Optional[int] = []
for minterm in minterms:
_snake_case : Any = """"""
for _ in range(snake_case__ ):
_snake_case : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case__ )
return temp
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = list(snake_case__ )
_snake_case : List[str] = list(snake_case__ )
_snake_case : Tuple = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase__ (snake_case__ : list[list[int]] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : Any = []
_snake_case : Union[str, Any] = [0] * len(snake_case__ )
for i in range(len(chart[0] ) ):
_snake_case : Tuple = 0
_snake_case : str = -1
for j in range(len(snake_case__ ) ):
if chart[j][i] == 1:
count += 1
_snake_case : Union[str, Any] = j
if count == 1:
_snake_case : Union[str, Any] = 1
for i in range(len(snake_case__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case__ ) ):
_snake_case : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
_snake_case : Optional[int] = 0
_snake_case : str = -1
_snake_case : Any = 0
for i in range(len(snake_case__ ) ):
_snake_case : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_snake_case : Dict = count_n
_snake_case : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case__ ) ):
_snake_case : Optional[Any] = 0
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )]
for i in range(len(snake_case__ ) ):
_snake_case : Any = prime_implicants[i].count("""_""" )
for j in range(len(snake_case__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ):
_snake_case : Tuple = 1
return chart
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = int(input("""Enter the no. of variables\n""" ) )
_snake_case : List[str] = [
float(snake_case__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_snake_case : List[str] = decimal_to_binary(snake_case__ , snake_case__ )
_snake_case : str = check(snake_case__ )
print("""Prime Implicants are:""" )
print(snake_case__ )
_snake_case : int = prime_implicant_chart(snake_case__ , snake_case__ )
_snake_case : str = selection(snake_case__ , snake_case__ )
print("""Essential Prime Implicants are:""" )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 64 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE = 0
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(UpperCAmelCase__ ) / "preprocessor_config.json"
__SCREAMING_SNAKE_CASE = Path(UpperCAmelCase__ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(UpperCAmelCase__ , "w" ) , )
json.dump({"model_type": "clip"} , open(UpperCAmelCase__ , "w" ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(UpperCAmelCase__ ) / "preprocessor_config.json"
__SCREAMING_SNAKE_CASE = Path(UpperCAmelCase__ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(UpperCAmelCase__ , "w" ) , )
json.dump({"model_type": "clip"} , open(UpperCAmelCase__ , "w" ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__SCREAMING_SNAKE_CASE = Path(UpperCAmelCase__ ) / "preprocessor_config.json"
__SCREAMING_SNAKE_CASE = Path(UpperCAmelCase__ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(UpperCAmelCase__ , "w" ) , )
json.dump({"model_type": "clip"} , open(UpperCAmelCase__ , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(UpperCAmelCase__ ).to_dict()
config_dict.pop("image_processor_type" )
__SCREAMING_SNAKE_CASE = CLIPImageProcessor(**UpperCAmelCase__ )
# save in new folder
model_config.save_pretrained(UpperCAmelCase__ )
config.save_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(UpperCAmelCase__ )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(UpperCAmelCase__ ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(UpperCAmelCase__ , "w" ) , )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> str:
with self.assertRaisesRegex(
UpperCAmelCase__ , "clip-base is not a local folder and is not a valid model identifier" ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("clip-base" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
with self.assertRaisesRegex(
UpperCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(UpperCAmelCase__ , revision="aaaaaa" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
with self.assertRaisesRegex(
UpperCAmelCase__ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCAmelCase_ ( self : Dict ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=UpperCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(UpperCAmelCase__ , trust_remote_code=UpperCAmelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register("custom" , UpperCAmelCase__ )
AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase__ ):
AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(UpperCAmelCase__ ) / "preprocessor_config.json"
__SCREAMING_SNAKE_CASE = Path(UpperCAmelCase__ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(UpperCAmelCase__ , "w" ) , )
json.dump({"model_type": "clip"} , open(UpperCAmelCase__ , "w" ) )
__SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(UpperCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = True
try:
AutoConfig.register("custom" , UpperCAmelCase__ )
AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__ )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=UpperCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=UpperCAmelCase__ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(UpperCAmelCase__ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 54 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
stooge(snake_case__ , 0 , len(snake_case__ ) - 1 )
return arr
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case : Dict = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case__ , i + t , (snake_case__) )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 64 | 0 |
'''simple docstring'''
import inspect
import unittest
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def snake_case ( self ):
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
lowerCamelCase_ = inspect.getmembers(UpperCamelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCamelCase_ = "k-diffusion"
elif backend == "invisible_watermark":
lowerCamelCase_ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 55 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["note_seq"]
def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(self, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
| 64 | 0 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class a ( unittest.TestCase ):
def A_ ( self : Any ):
snake_case_ = torch.nn.Linear(10 , 10 )
snake_case_ = torch.optim.SGD(model.parameters() , 0.1 )
snake_case_ = Accelerator()
snake_case_ = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 56 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : int = data
_snake_case : Dict = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def UpperCamelCase_ ( a_: Optional[Any], a_: Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_snake_case : Optional[int] = self.data + padding + struct.pack(""">Q""", 8 * len(self.data ) )
return padded_data
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = list(struct.unpack(""">16L""", a_ ) ) + [0] * 64
for i in range(16, 80 ):
_snake_case : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.padding()
_snake_case : str = self.split_blocks()
for block in self.blocks:
_snake_case : Any = self.expand_block(a_ )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.h
for i in range(0, 80 ):
if 0 <= i < 20:
_snake_case : int = (b & c) | ((~b) & d)
_snake_case : str = 0X5a827999
elif 20 <= i < 40:
_snake_case : Optional[int] = b ^ c ^ d
_snake_case : str = 0X6ed9eba1
elif 40 <= i < 60:
_snake_case : List[Any] = (b & c) | (b & d) | (c & d)
_snake_case : List[Any] = 0X8f1bbcdc
elif 60 <= i < 80:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : int = 0Xca62c1d6
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = (
self.rotate(a_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(a_, 30 ),
c,
d,
)
_snake_case : Union[str, Any] = (
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = B"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_snake_case : Union[str, Any] = parser.parse_args()
_snake_case : List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_snake_case : str = f.read()
else:
_snake_case : int = bytes(snake_case__ , """utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 64 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
while len(_UpperCamelCase ) > 1:
__lowerCAmelCase = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__lowerCAmelCase = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(__a )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "rag"
lowercase__ = True
def __init__( self: Union[str, Any], a_: int=None, a_: Tuple=True, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, a_: Optional[Any]=" / ", a_: Tuple=" // ", a_: List[Any]=5, a_: Dict=300, a_: Tuple=768, a_: Optional[Any]=8, a_: int="wiki_dpr", a_: Any="train", a_: Optional[int]="compressed", a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: str=False, a_: Dict=0.0, a_: Union[str, Any]=True, a_: Union[str, Any]=False, a_: str=False, a_: List[str]=False, a_: Union[str, Any]=True, a_: Any=None, **a_: List[Any], ):
'''simple docstring'''
super().__init__(
bos_token_id=a_, pad_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, forced_eos_token_id=a_, is_encoder_decoder=a_, prefix=a_, vocab_size=a_, **a_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_snake_case : Union[str, Any] = kwargs.pop("""question_encoder""" )
_snake_case : List[str] = question_encoder_config.pop("""model_type""" )
_snake_case : Union[str, Any] = kwargs.pop("""generator""" )
_snake_case : Any = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_snake_case : Union[str, Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Optional[Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Any = reduce_loss
_snake_case : Optional[int] = label_smoothing
_snake_case : Dict = exclude_bos_score
_snake_case : int = do_marginalize
_snake_case : Optional[Any] = title_sep
_snake_case : Any = doc_sep
_snake_case : List[str] = n_docs
_snake_case : Tuple = max_combined_length
_snake_case : Optional[Any] = dataset
_snake_case : Union[str, Any] = dataset_split
_snake_case : Tuple = index_name
_snake_case : Any = retrieval_vector_size
_snake_case : Union[str, Any] = retrieval_batch_size
_snake_case : str = passages_path
_snake_case : Tuple = index_path
_snake_case : List[Any] = use_dummy_dataset
_snake_case : Optional[Any] = output_retrieved
_snake_case : Tuple = do_deduplication
_snake_case : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_snake_case : Dict = getattr(self.generator, """forced_eos_token_id""", a_ )
@classmethod
def UpperCamelCase_ ( cls: Any, a_: PretrainedConfig, a_: PretrainedConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : List[str] = self.question_encoder.to_dict()
_snake_case : Tuple = self.generator.to_dict()
_snake_case : Dict = self.__class__.model_type
return output
| 64 | 0 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = CLIPConfig
UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , A ) -> Optional[Any]:
super().__init__(A )
_SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(config.vision_config )
_SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
_SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def snake_case_( self , A , A , A=0.5 , A=0.5 ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.vision_model(A )[0]
_SCREAMING_SNAKE_CASE = self.p_head(A )
_SCREAMING_SNAKE_CASE = nsfw_detected.flatten()
_SCREAMING_SNAKE_CASE = nsfw_detected > p_threshold
_SCREAMING_SNAKE_CASE = nsfw_detected.tolist()
if any(A ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(A ):
if nsfw_detected_:
_SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
_SCREAMING_SNAKE_CASE = self.w_head(A )
_SCREAMING_SNAKE_CASE = watermark_detected.flatten()
_SCREAMING_SNAKE_CASE = watermark_detected > w_threshold
_SCREAMING_SNAKE_CASE = watermark_detected.tolist()
if any(A ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(A ):
if watermark_detected_:
_SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 58 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
A_ = TypeVar('''T''')
A_ = Union[List[T], Tuple[T, ...]]
A_ = Union[T, List[T], Dict[str, T]]
A_ = Union[str, bytes, os.PathLike]
| 64 | 0 |
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = val
snake_case : Any = None
snake_case : Tuple = None
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str ) -> Tuple:
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
snake_case : Any = Node(snake_case__ )
else:
self.left.insert(snake_case__ )
elif val > self.val:
if self.right is None:
snake_case : Tuple = Node(snake_case__ )
else:
self.right.insert(snake_case__ )
else:
snake_case : Optional[Any] = val
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ):
# Recursive traversal
if root:
inorder(root.left , __lowerCamelCase )
res.append(root.val )
inorder(root.right , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Tuple ):
# Build BST
if len(__lowerCamelCase ) == 0:
return arr
snake_case : Optional[int] = Node(arr[0] )
for i in range(1 , len(__lowerCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
snake_case : Optional[int] = []
inorder(__lowerCamelCase , __lowerCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 59 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
_snake_case : List[Any] = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_snake_case , _snake_case : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
_snake_case , _snake_case : List[str] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 64 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 60 |
"""simple docstring"""
from math import factorial
A_ = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ShapEPipeline
SCREAMING_SNAKE_CASE__ : Any = ["""prompt"""]
SCREAMING_SNAKE_CASE__ : str = ["""prompt"""]
SCREAMING_SNAKE_CASE__ : List[str] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE__ : int = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 8
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
UpperCAmelCase_ : str = PriorTransformer(**lowercase_ )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Any = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase_ : Dict = ShapERenderer(**lowercase_ )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.dummy_prior
UpperCAmelCase_ : Optional[int] = self.dummy_text_encoder
UpperCAmelCase_ : Optional[Any] = self.dummy_tokenizer
UpperCAmelCase_ : List[Any] = self.dummy_renderer
UpperCAmelCase_ : List[Any] = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=lowercase_ , clip_sample=lowercase_ , clip_sample_range=1.0 , )
UpperCAmelCase_ : Any = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : List[Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "cpu"
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : List[Any] = self.pipeline_class(**lowercase_ )
UpperCAmelCase_ : Union[str, Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = pipe(**self.get_dummy_inputs(lowercase_ ) )
UpperCAmelCase_ : Any = output.images[0]
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase_ : Tuple = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = torch_device == "cpu"
UpperCAmelCase_ : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase_ , relax_max_difference=lowercase_ , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : List[Any] = self.pipeline_class(**lowercase_ )
UpperCAmelCase_ : Any = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(lowercase_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase_ : Tuple = batch_size * [inputs[key]]
UpperCAmelCase_ : Optional[Any] = pipe(**lowercase_ , num_images_per_prompt=lowercase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
UpperCAmelCase_ : Optional[int] = ShapEPipeline.from_pretrained("openai/shap-e" )
UpperCAmelCase_ : Optional[Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCAmelCase_ : Any = pipe(
"a shark" , generator=lowercase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 61 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : int ):
"""simple docstring"""
if len(snake_case__ ) < k or k < 0:
raise ValueError("""Invalid Input""" )
_snake_case : Optional[int] = sum(array[:k] )
for i in range(len(snake_case__ ) - k ):
_snake_case : Optional[Any] = current_sum - array[i] + array[i + k]
_snake_case : List[str] = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A_ = [randint(-10_00, 10_00) for i in range(1_00)]
A_ = randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 64 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase =AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase =-1
__UpperCamelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase =model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
__UpperCamelCase =tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCamelCase =TextStreamer(A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCamelCase =cs.out[:-1]
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase =AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase =-1
__UpperCamelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase =model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
__UpperCamelCase =tokenizer.decode(greedy_ids[0] )
__UpperCamelCase =TextIteratorStreamer(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__UpperCamelCase =Thread(target=model.generate , kwargs=A_ )
thread.start()
__UpperCamelCase =''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase =AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase =-1
__UpperCamelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase =model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
__UpperCamelCase =greedy_ids[:, input_ids.shape[1] :]
__UpperCamelCase =tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCamelCase =TextStreamer(A_ , skip_prompt=A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCamelCase =cs.out[:-1]
self.assertEqual(A_ , A_ )
def _a ( self ) -> Any:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCamelCase =AutoTokenizer.from_pretrained('distilgpt2' )
__UpperCamelCase =AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(A_ )
__UpperCamelCase =-1
__UpperCamelCase =torch.ones((1, 5) , device=A_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCamelCase =TextStreamer(A_ , skip_special_tokens=A_ )
model.generate(A_ , max_new_tokens=1 , do_sample=A_ , streamer=A_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCamelCase =cs.out[:-1] # Remove the final "\n"
__UpperCamelCase =tokenizer(A_ , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self ) -> Tuple:
__UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase =AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(A_ )
__UpperCamelCase =-1
__UpperCamelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
__UpperCamelCase =TextIteratorStreamer(A_ , timeout=0.001 )
__UpperCamelCase ={'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__UpperCamelCase =Thread(target=model.generate , kwargs=A_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A_ ):
__UpperCamelCase =''
for new_text in streamer:
streamer_text += new_text
| 62 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''yjernite/retribert-base-uncased''': 5_12,
}
A_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = RetriBertTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: int, a_: int=None, a_: Dict=None, a_: Any=True, a_: int="[UNK]", a_: Any="[SEP]", a_: List[Any]="[PAD]", a_: List[Any]="[CLS]", a_: str="[MASK]", a_: Dict=True, a_: Optional[int]=None, **a_: Tuple, ):
'''simple docstring'''
super().__init__(
a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, )
_snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", a_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", a_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars
):
_snake_case : Dict = getattr(a_, normalizer_state.pop("""type""" ) )
_snake_case : List[Any] = do_lower_case
_snake_case : List[str] = strip_accents
_snake_case : Tuple = tokenize_chinese_chars
_snake_case : Tuple = normalizer_class(**a_ )
_snake_case : List[str] = do_lower_case
def UpperCamelCase_ ( self: Any, a_: str, a_: Optional[int]=None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self: List[str], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
| 64 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__a =(
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__a =False
__a =False
def UpperCamelCase__ ( self : str , __a : Tuple , __a : Optional[Any] , __a : List[str]=False ):
_a = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
_a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : int , __a : Union[str, Any]=13 , __a : int=7 , __a : List[str]=True , __a : str=True , __a : Union[str, Any]=True , __a : Union[str, Any]=True , __a : Tuple=99 , __a : Any=32 , __a : Tuple=32 , __a : Any=2 , __a : int=4 , __a : Dict=37 , __a : Union[str, Any]="gelu" , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : Dict=5_12 , __a : int=16 , __a : str=2 , __a : Tuple=0.02 , __a : Any=3 , __a : Tuple=4 , __a : Any=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
_a = embedding_size
def UpperCamelCase__ ( self : Any ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : Optional[int] , __a : Optional[Any] , __a : int , __a : Tuple , __a : str , __a : List[Any] , __a : List[Any] , __a : str ):
_a = TFMobileBertModel(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
_a = [input_ids, input_mask]
_a = model(__a )
_a = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self : List[str] , __a : List[str] , __a : Dict , __a : Tuple , __a : Union[str, Any] , __a : Dict , __a : List[Any] , __a : List[str] ):
_a = TFMobileBertForMaskedLM(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Dict , __a : Dict , __a : Any , __a : Any , __a : str , __a : Tuple , __a : str ):
_a = TFMobileBertForNextSentencePrediction(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self : List[Any] , __a : Tuple , __a : Optional[Any] , __a : List[Any] , __a : List[Any] , __a : List[str] , __a : Optional[Any] , __a : List[Any] ):
_a = TFMobileBertForPreTraining(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self : List[str] , __a : List[Any] , __a : str , __a : str , __a : Union[str, Any] , __a : List[Any] , __a : List[Any] , __a : List[Any] ):
_a = self.num_labels
_a = TFMobileBertForSequenceClassification(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : Any , __a : str , __a : Any , __a : Optional[int] , __a : int , __a : Optional[int] , __a : int , __a : List[Any] ):
_a = self.num_choices
_a = TFMobileBertForMultipleChoice(config=__a )
_a = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
_a = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
_a = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
_a = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self : List[str] , __a : List[str] , __a : List[Any] , __a : Dict , __a : Dict , __a : int , __a : str , __a : List[str] ):
_a = self.num_labels
_a = TFMobileBertForTokenClassification(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Optional[Any] , __a : Tuple , __a : List[Any] , __a : str , __a : int , __a : Any , __a : int , __a : List[str] ):
_a = TFMobileBertForQuestionAnswering(config=__a )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : Any ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def UpperCamelCase__ ( self : str ):
_a = TFMobileBertModelTest.TFMobileBertModelTester(self )
_a = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : List[str] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a )
def UpperCamelCase__ ( self : Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a )
def UpperCamelCase__ ( self : Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a )
@slow
def UpperCamelCase__ ( self : List[str] ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_a = TFMobileBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : Optional[Any] ):
_a = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
_a = tf.constant([[0, 1, 2, 3, 4, 5]] )
_a = model(__a )[0]
_a = [1, 6, 3_05_22]
self.assertEqual(output.shape , __a )
_a = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 63 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"add_prefix_space": True}
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : List[Any] = {"""unk_token""": """<unk>"""}
_snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def UpperCamelCase_ ( self: Any, **a_: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Any, **a_: str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = """lower newer"""
_snake_case : Tuple = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
_snake_case : Optional[Any] = """lower newer"""
_snake_case : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : int = tokenizer.tokenize(a_, add_prefix_space=a_ )
self.assertListEqual(a_, a_ )
_snake_case : str = tokens + [tokenizer.unk_token]
_snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : int = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : Dict = """lower newer"""
# Testing tokenization
_snake_case : Dict = tokenizer.tokenize(a_, add_prefix_space=a_ )
_snake_case : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids without special tokens
_snake_case : Optional[Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : Tuple = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids with special tokens
_snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : int = tokenizer.encode(a_, add_prefix_space=a_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
# Testing the unknown token
_snake_case : Tuple = tokens + [rust_tokenizer.unk_token]
_snake_case : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Dict, *a_: Dict, **a_: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int, a_: List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
# Simple input
_snake_case : Any = """This is a simple input"""
_snake_case : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Optional[int] = ("""This is a simple input""", """This is a pair""")
_snake_case : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
# Pair input
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="""<pad>""" )
# Simple input
_snake_case : List[Any] = """This is a simple input"""
_snake_case : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
_snake_case : Any = ("""This is a simple input""", """This is a pair""")
_snake_case : str = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_snake_case : str = tokenizer.pad_token_id
_snake_case : Optional[int] = tokenizer(a_, padding="""max_length""", max_length=30, return_tensors="""np""" )
_snake_case : Dict = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
_snake_case : Tuple = tokenizer(*a_, padding="""max_length""", max_length=60, return_tensors="""np""" )
_snake_case : Optional[Any] = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = """$$$"""
_snake_case : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=a_, add_bos_token=a_ )
_snake_case : str = """This is a simple input"""
_snake_case : int = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Tuple = tokenizer(a_ )
_snake_case : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0], a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case : Optional[int] = tokenizer.decode(out_s.input_ids )
_snake_case : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_snake_case : Dict = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_snake_case : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_snake_case : Optional[Any] = tokenizer.encode(a_ )
_snake_case : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_snake_case : Optional[Any] = tokenizer.decode(a_, truncate_before_pattern=a_ )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
| 64 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A :
def __init__(self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Any=1_3 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Tuple=2_4 , __UpperCAmelCase : List[str]=1_6 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Dict=3_2 , __UpperCAmelCase : str=5 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : Tuple=3_7 , __UpperCAmelCase : int="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[int]=1_0 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Tuple=2 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = max_length
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = frequency_stride
UpperCAmelCase__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase__ = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase__ = frequency_out_dimension * time_out_dimension
UpperCAmelCase__ = num_patches + 2
def lowercase_ (self : str ) -> str:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, input_values, labels
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = ASTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[str] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Any = False
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowercase_ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = ASTModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def lowercase_ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
pass
def lowercase_ (self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def lowercase_ (self : str ) -> str:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["input_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@slow
def lowercase_ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = ASTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset" )
UpperCAmelCase__ , UpperCAmelCase__ = torchaudio.load(__A )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__UpperCAmelCase )
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ , UpperCAmelCase__ = prepare_audio()
UpperCAmelCase__ = audio.squeeze().numpy()
UpperCAmelCase__ = feature_extractor(__UpperCAmelCase , sampling_rate=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__UpperCAmelCase )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
UpperCAmelCase__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 65 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A_ = re.compile(r'''\s+''')
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(snake_case__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Any = [len(snake_case__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )}
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str]=5 ):
"""simple docstring"""
_snake_case : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_snake_case : Tuple = example["""content"""].splitlines()
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=0.05 ):
"""simple docstring"""
_snake_case : Optional[Any] = ["""unit tests""", """test file""", """configuration file"""]
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : Dict = 0
_snake_case : str = 0
# first test
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case : Optional[int] = example["""content"""].count("""\n""" )
_snake_case : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = ["""def """, """class """, """for """, """while """]
_snake_case : str = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str]=4 ):
"""simple docstring"""
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : str = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Optional[Any] = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""]
_snake_case : Optional[Any] = len(example["""content"""] ) / len(snake_case__ )
return {"ratio": ratio}
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[int] = {}
results.update(get_hash(snake_case__ ) )
results.update(line_stats(snake_case__ ) )
results.update(alpha_stats(snake_case__ ) )
results.update(char_token_ratio(snake_case__ ) )
results.update(is_autogenerated(snake_case__ ) )
results.update(is_config_or_test(snake_case__ ) )
results.update(has_no_keywords(snake_case__ ) )
results.update(has_few_assignments(snake_case__ ) )
return results
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if not check_uniques(snake_case__ , snake_case__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """rb""" ) as f_in:
with gzip.open(str(snake_case__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case__ , snake_case__ )
os.unlink(snake_case__ )
# Settings
A_ = HfArgumentParser(PreprocessingArguments)
A_ = parser.parse_args()
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A_ = time.time()
A_ = load_dataset(args.dataset_name, split='''train''')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
A_ = time.time()
A_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
A_ = set(ds.unique('''hash'''))
A_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
A_ = time.time()
A_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A_ = time.time()
A_ , A_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
A_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
A_ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
A_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A_ = str(data_dir / F'''file-{file_number+1:012}.json''')
A_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 64 | 0 |
"""simple docstring"""
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = [False] * len(_lowercase )
snake_case_ :int = [s]
snake_case_ :Tuple = True
while queue:
snake_case_ :int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowercase )
snake_case_ :List[Any] = True
snake_case_ :Optional[Any] = u
return visited[t]
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = [-1] * (len(_lowercase ))
snake_case_ :Optional[int] = 0
snake_case_ :Any = []
snake_case_ :List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowercase, _lowercase, _lowercase, _lowercase ):
snake_case_ :Optional[Any] = float("""Inf""" )
snake_case_ :Tuple = sink
while s != source:
# Find the minimum value in select path
snake_case_ :Optional[int] = min(_lowercase, graph[parent[s]][s] )
snake_case_ :Dict = parent[s]
max_flow += path_flow
snake_case_ :List[str] = sink
while v != source:
snake_case_ :Optional[int] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case_ :str = parent[v]
for i in range(len(_lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 66 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = ort.SessionOptions()
_snake_case : Union[str, Any] = False
return options
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_snake_case : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""onnx""", safety_checker=a_, feature_extractor=a_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[Any] = """A red cat sitting on a park bench"""
_snake_case : Optional[int] = np.random.RandomState(0 )
_snake_case : Any = pipe(
prompt=a_, image=a_, mask_image=a_, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=a_, output_type="""np""", )
_snake_case : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 64 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Optional[Union[str, os.PathLike]] , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(a ):
__lowerCamelCase = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" )
if len(a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a )
| 67 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_snake_case : int = value
elif weight_type == "weight_g":
_snake_case : str = value
elif weight_type == "weight_v":
_snake_case : Tuple = value
elif weight_type == "bias":
_snake_case : List[str] = value
else:
_snake_case : int = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : List[Any] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_snake_case : Optional[Any] = None
for name, value in fairseq_dict.items():
_snake_case : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : Dict = True
elif name.split(""".""" )[0] == "proj":
_snake_case : Dict = fairseq_model.proj
_snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : Dict = True
if "*" in mapped_key:
_snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : str = """weight_g"""
elif "weight_v" in name:
_snake_case : Optional[Any] = """weight_v"""
elif "bias" in name:
_snake_case : Union[str, Any] = """bias"""
elif "weight" in name:
_snake_case : int = """weight"""
else:
_snake_case : Optional[int] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = full_name.split("""conv_layers.""" )[-1]
_snake_case : Optional[int] = name.split(""".""" )
_snake_case : List[str] = int(items[0] )
_snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_snake_case : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_snake_case : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_snake_case : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = emb.weight.shape
_snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Any = f.readlines()
_snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines]
_snake_case : str = len(snake_case__ )
_snake_case : Tuple = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
_snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ )
_snake_case : List[str] = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
_snake_case : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_snake_case : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
_snake_case : Any = WavaVecaModel(snake_case__ )
_snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
_snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ )
_snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
_snake_case : Any = False
# add projection layer
_snake_case : int = nn.Parameter(projection_layer.weight )
_snake_case : Any = nn.Parameter(projection_layer.bias )
_snake_case : Any = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(snake_case__ , snake_case__ )
_snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) )
tokenizer.save_pretrained(snake_case__ )
_snake_case : str = hf_wavavec.config.to_dict()
_snake_case : List[str] = tokenizer.pad_token_id
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Union[str, Any] = tokenizer.eos_token_id
_snake_case : Optional[Any] = """speech_to_text_2"""
_snake_case : Optional[int] = """wav2vec2"""
_snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 64 | 0 |
import torch
from torch import nn
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=1 , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A__ = n_token
A__ = d_embed
A__ = d_proj
A__ = cutoffs + [n_token]
A__ = [0] + self.cutoffs
A__ = div_val
A__ = self.cutoffs[0]
A__ = len(self.cutoffs ) - 1
A__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
A__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
A__ = nn.Parameter(torch.zeros(self.n_clusters ) )
A__ = nn.ModuleList()
A__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase , lowercase ) ) )
else:
self.out_projs.append(lowercase )
self.out_layers.append(nn.Linear(lowercase , lowercase ) )
else:
for i in range(len(self.cutoffs ) ):
A__ , A__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase , lowercase ) ) )
self.out_layers.append(nn.Linear(lowercase , r_idx - l_idx ) )
A__ = keep_order
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
if proj is None:
A__ = nn.functional.linear(lowercase , lowercase , bias=lowercase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
A__ = nn.functional.linear(lowercase , proj.t().contiguous() )
A__ = nn.functional.linear(lowercase , lowercase , bias=lowercase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase ( self , lowercase , lowercase=None , lowercase=False ) -> Dict:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
A__ = hidden[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = hidden.view(-1 , hidden.size(-1 ) )
A__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
A__ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
A__ = self._compute_logit(lowercase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
A__ = labels != -100
A__ = torch.zeros_like(lowercase , dtype=hidden.dtype , device=hidden.device )
A__ = (
-nn.functional.log_softmax(lowercase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
A__ = nn.functional.log_softmax(lowercase , dim=-1 )
else:
# construct weights and biases
A__ , A__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ , A__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ = self.out_layers[0].weight[l_idx:r_idx]
A__ = self.out_layers[0].bias[l_idx:r_idx]
else:
A__ = self.out_layers[i].weight
A__ = self.out_layers[i].bias
if i == 0:
A__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
A__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase )
biases.append(lowercase )
A__ , A__ , A__ = weights[0], biases[0], self.out_projs[0]
A__ = self._compute_logit(lowercase , lowercase , lowercase , lowercase )
A__ = nn.functional.log_softmax(lowercase , dim=1 )
if labels is None:
A__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
A__ = torch.zeros_like(lowercase , dtype=hidden.dtype , device=hidden.device )
A__ = 0
A__ = [0] + self.cutoffs
for i in range(len(lowercase ) - 1 ):
A__ , A__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
A__ = (labels >= l_idx) & (labels < r_idx)
A__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
A__ = labels.index_select(0 , lowercase ) - l_idx
A__ = head_logprob.index_select(0 , lowercase )
A__ = hidden.index_select(0 , lowercase )
else:
A__ = hidden
if i == 0:
if labels is not None:
A__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
A__ = head_logprob[:, : self.cutoffs[0]]
else:
A__ , A__ , A__ = weights[i], biases[i], self.out_projs[i]
A__ = self._compute_logit(lowercase , lowercase , lowercase , lowercase )
A__ = nn.functional.log_softmax(lowercase , dim=1 )
A__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
A__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
A__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
A__ = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowercase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase ( self , lowercase ) -> Dict:
'''simple docstring'''
if self.n_clusters == 0:
A__ = self._compute_logit(lowercase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowercase , dim=-1 )
else:
# construct weights and biases
A__ , A__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ , A__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ = self.out_layers[0].weight[l_idx:r_idx]
A__ = self.out_layers[0].bias[l_idx:r_idx]
else:
A__ = self.out_layers[i].weight
A__ = self.out_layers[i].bias
if i == 0:
A__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
A__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase )
biases.append(lowercase )
A__ , A__ , A__ = weights[0], biases[0], self.out_projs[0]
A__ = self._compute_logit(lowercase , lowercase , lowercase , lowercase )
A__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
A__ = nn.functional.log_softmax(lowercase , dim=1 )
A__ = [0] + self.cutoffs
for i in range(len(lowercase ) - 1 ):
A__ , A__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
A__ = head_logprob[:, : self.cutoffs[0]]
else:
A__ , A__ , A__ = weights[i], biases[i], self.out_projs[i]
A__ = self._compute_logit(lowercase , lowercase , lowercase , lowercase )
A__ = nn.functional.log_softmax(lowercase , dim=1 )
A__ = head_logprob[:, -i] + tail_logprob_i
A__ = logprob_i
return out
| 68 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ):
"""simple docstring"""
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : Any ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : List[Any] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : str = 16
elif accelerator.mixed_precision != "no":
_snake_case : Optional[int] = 8
else:
_snake_case : Optional[int] = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
_snake_case : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
_snake_case : List[Any] = 2
# Initialize accelerator
_snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config["""lr"""]
_snake_case : str = int(config["""num_epochs"""] )
_snake_case : Union[str, Any] = int(config["""seed"""] )
_snake_case : Union[str, Any] = int(config["""batch_size"""] )
_snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ )
_snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
_snake_case : str = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : int = model(**snake_case__ )
_snake_case : str = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : int = model(**snake_case__ )
_snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
_snake_case : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : Dict = parser.parse_args()
_snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 64 | 0 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "linear"
SCREAMING_SNAKE_CASE_ = "cosine"
SCREAMING_SNAKE_CASE_ = "cosine_with_restarts"
SCREAMING_SNAKE_CASE_ = "polynomial"
SCREAMING_SNAKE_CASE_ = "constant"
SCREAMING_SNAKE_CASE_ = "constant_with_warmup"
SCREAMING_SNAKE_CASE_ = "piecewise_constant"
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = -1 ) -> Optional[Any]:
return LambdaLR(UpperCAmelCase , lambda UpperCAmelCase : 1 , last_epoch=UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = -1 ) -> Optional[Any]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1.0 , UpperCAmelCase ) )
return 1.0
return LambdaLR(UpperCAmelCase , UpperCAmelCase , last_epoch=UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = -1 ) -> List[Any]:
snake_case_ = {}
snake_case_ = step_rules.split(',' )
for rule_str in rule_list[:-1]:
snake_case_ , snake_case_ = rule_str.split(':' )
snake_case_ = int(UpperCAmelCase )
snake_case_ = float(UpperCAmelCase )
snake_case_ = value
snake_case_ = float(rule_list[-1] )
def create_rules_function(UpperCAmelCase , UpperCAmelCase ):
def rule_func(UpperCAmelCase ) -> float:
snake_case_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
snake_case_ = create_rules_function(UpperCAmelCase , UpperCAmelCase )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , last_epoch=UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=-1 ) -> Optional[int]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.5 , UpperCAmelCase = -1 ) -> Optional[Any]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
snake_case_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = -1 ) -> Optional[int]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
snake_case_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1e-7 , UpperCAmelCase=1.0 , UpperCAmelCase=-1 ) -> List[str]:
snake_case_ = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
snake_case_ = lr_init - lr_end
snake_case_ = num_training_steps - num_warmup_steps
snake_case_ = 1 - (current_step - num_warmup_steps) / decay_steps
snake_case_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__UpperCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 1.0 , UpperCAmelCase = -1 , ) -> Union[str, Any]:
snake_case_ = SchedulerType(UpperCAmelCase )
snake_case_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(UpperCAmelCase , last_epoch=UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(UpperCAmelCase , step_rules=UpperCAmelCase , last_epoch=UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(UpperCAmelCase , num_warmup_steps=UpperCAmelCase , last_epoch=UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
UpperCAmelCase , num_warmup_steps=UpperCAmelCase , num_training_steps=UpperCAmelCase , num_cycles=UpperCAmelCase , last_epoch=UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
UpperCAmelCase , num_warmup_steps=UpperCAmelCase , num_training_steps=UpperCAmelCase , power=UpperCAmelCase , last_epoch=UpperCAmelCase , )
return schedule_func(
UpperCAmelCase , num_warmup_steps=UpperCAmelCase , num_training_steps=UpperCAmelCase , last_epoch=UpperCAmelCase )
| 69 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 0 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = {}
_lowerCAmelCase = tokenizer(example["""content"""] , truncation=lowerCAmelCase )["""input_ids"""]
_lowerCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
A__ : int =HfArgumentParser(PretokenizationArguments)
A__ : Dict =parser.parse_args()
if args.num_workers is None:
A__ : int =multiprocessing.cpu_count()
A__ : Optional[int] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
A__ : Tuple =time.time()
A__ : Optional[int] =load_dataset(args.dataset_name, split='''train''')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
A__ : Union[str, Any] =time.time()
A__ : Optional[int] =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
A__ : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 70 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A_ = logging.get_logger(__name__)
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = None
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Tuple, a_: int, a_: int, a_: str, **a_: Dict ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any], a_: List[str] ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCamelCase_ ( cls: Tuple ):
'''simple docstring'''
return f"`pip install {cls.pip_package or cls.name}`"
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "optuna"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: int, a_: str, **a_: List[str] ):
'''simple docstring'''
return run_hp_search_optuna(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Any ):
'''simple docstring'''
return default_hp_space_optuna(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "ray"
lowercase__ = "'ray[tune]'"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def UpperCamelCase_ ( self: int, a_: Optional[Any], a_: int, a_: str, **a_: List[Any] ):
'''simple docstring'''
return run_hp_search_ray(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Tuple ):
'''simple docstring'''
return default_hp_space_ray(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "sigopt"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: str, **a_: int ):
'''simple docstring'''
return run_hp_search_sigopt(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: List[str] ):
'''simple docstring'''
return default_hp_space_sigopt(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "wandb"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: str, **a_: Union[str, Any] ):
'''simple docstring'''
return run_hp_search_wandb(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Any ):
'''simple docstring'''
return default_hp_space_wandb(a_ )
A_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case__ ) > 0:
_snake_case : Any = available_backends[0].name
if len(snake_case__ ) > 1:
logger.info(
F"{len(snake_case__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 64 | 0 |
import pprint
import requests
A_ :Dict = '''https://zenquotes.io/api'''
def A ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def A ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ :str = random_quotes()
pprint.pprint(response)
| 71 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : str = kwargs.pop("""feature_extractor""" )
_snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
_snake_case : Dict = self.image_processor
_snake_case : Any = False
def __call__( self: Any, *a_: Any, **a_: Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
_snake_case : Dict = kwargs.pop("""images""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : Optional[int] = args[0]
_snake_case : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, *a_, **a_ )
if text is not None:
_snake_case : Tuple = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : List[str] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case : Any = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : int = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : Dict = self.tokenizer.get_added_vocab()
_snake_case : str = {}
while tokens:
_snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE )
if start_token is None:
break
_snake_case : List[Any] = start_token.group(1 )
_snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE )
_snake_case : Dict = start_token.group()
if end_token is None:
_snake_case : List[Any] = tokens.replace(a_, """""" )
else:
_snake_case : List[str] = end_token.group()
_snake_case : str = re.escape(a_ )
_snake_case : str = re.escape(a_ )
_snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE )
if content is not None:
_snake_case : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ )
if value:
if len(a_ ) == 1:
_snake_case : List[str] = value[0]
_snake_case : List[str] = value
else: # leaf nodes
_snake_case : Tuple = []
for leaf in content.split(r"""<sep/>""" ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : int = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
_snake_case : int = output[key][0]
_snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : Tuple, A_ : Tuple, A_ : Any, A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = [False] * len(A_ )
_lowerCamelCase : Union[str, Any] = []
queue.append(A_ )
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = u
return visited[t]
def snake_case_ ( A_ : Union[str, Any], A_ : str, A_ : int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [-1] * (len(A_ ))
_lowerCamelCase : Optional[Any] = 0
while bfs(A_, A_, A_, A_ ):
_lowerCamelCase : Optional[Any] = float('''Inf''' )
_lowerCamelCase : Union[str, Any] = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(A_, graph[parent[s]][s] )
_lowerCamelCase : List[str] = parent[s]
max_flow += path_flow
_lowerCamelCase : List[str] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : Optional[Any] = parent[v]
return max_flow
lowerCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase__ , lowerCAmelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 72 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A_ :
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
_UpperCAmelCase : float
_UpperCAmelCase : Tuple[int]
def lowerCAmelCase ( self : Optional[int]):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def lowerCAmelCase ( self : str):
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa))
def lowerCAmelCase ( self : int):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa))
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : int = torch.arange(self.height * self.width)
__lowerCamelCase : Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(SCREAMING_SNAKE_CASE__ ,self.width ,rounding_mode='trunc'),
] ,axis=1 ,)
return coords
@property
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase , *__lowerCamelCase : Union[str, Any] = self.shape
__lowerCamelCase : Optional[Any] = int(np.prod(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = self.get_image_coords()
__lowerCamelCase : List[str] = torch.broadcast_to(coords.unsqueeze(0) ,[batch_size * inner_batch_size, *coords.shape])
__lowerCamelCase : Any = self.get_camera_rays(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = rays.view(SCREAMING_SNAKE_CASE__ ,inner_batch_size * self.height * self.width ,2 ,3)
return rays
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : torch.Tensor):
__lowerCamelCase , *__lowerCamelCase , __lowerCamelCase : List[str] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase : Tuple = coords.view(SCREAMING_SNAKE_CASE__ ,-1 ,2)
__lowerCamelCase : Optional[int] = self.resolution()
__lowerCamelCase : Optional[int] = self.fov()
__lowerCamelCase : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase : int = fracs * torch.tan(fov / 2)
__lowerCamelCase : int = fracs.view(SCREAMING_SNAKE_CASE__ ,-1 ,2)
__lowerCamelCase : Union[str, Any] = (
self.z.view(SCREAMING_SNAKE_CASE__ ,1 ,3)
+ self.x.view(SCREAMING_SNAKE_CASE__ ,1 ,3) * fracs[:, :, :1]
+ self.y.view(SCREAMING_SNAKE_CASE__ ,1 ,3) * fracs[:, :, 1:]
)
__lowerCamelCase : Tuple = directions / directions.norm(dim=-1 ,keepdim=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(SCREAMING_SNAKE_CASE__ ,1 ,3) ,[batch_size, directions.shape[1], 3]),
directions,
] ,dim=2 ,)
return rays.view(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,2 ,3)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> DifferentiableProjectiveCamera:
__lowerCamelCase : Dict = []
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Optional[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__lowerCamelCase : Dict = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase : List[str] = -z * 4
__lowerCamelCase : List[str] = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] )
__lowerCamelCase : int = np.cross(lowerCamelCase__ , lowerCamelCase__ )
origins.append(lowerCamelCase__ )
xs.append(lowerCamelCase__ )
ys.append(lowerCamelCase__ )
zs.append(lowerCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase__ )) , )
| 73 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = CodeGenTokenizer
def __init__( self: Union[str, Any], a_: List[Any]=None, a_: str=None, a_: str=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: str="<|endoftext|>", a_: List[Any]=False, **a_: List[str], ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
if kwargs.pop("""add_bos_token""", a_ ):
_snake_case : str = kwargs.pop("""name_or_path""", """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Dict = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Dict = add_prefix_space
_snake_case : str = pre_tok_class(**a_ )
_snake_case : List[Any] = add_prefix_space
def UpperCamelCase_ ( self: Any, *a_: Any, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], *a_: Any, **a_: List[str] ):
'''simple docstring'''
_snake_case : Dict = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: str, a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: List[str], ):
'''simple docstring'''
_snake_case : Any = super().decode(
token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, )
if truncate_before_pattern is not None and len(a_ ) > 0:
_snake_case : List[str] = self.truncate(a_, a_ )
return decoded_text
def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Optional[Any] ):
'''simple docstring'''
def find_re(a_: Dict, a_: str, a_: Union[str, Any] ):
_snake_case : Any = pattern.search(a_, a_ )
return m.start() if m else -1
_snake_case : Tuple = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern]
_snake_case : List[Any] = list(re.finditer("""^print""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : int = completion[: prints[1].start()]
_snake_case : List[str] = list(re.finditer("""^def""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : List[Any] = completion[: defs[1].start()]
_snake_case : int = 0
_snake_case : List[Any] = [
pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 64 | 0 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowercase = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
_lowercase = get_tests_dir('''fixtures/vocab.json''')
_lowercase = get_tests_dir('''fixtures''')
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = 0
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
A = WavaVecaConfig()
A = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(A_ )
processor.save_pretrained(A_ )
A = AutoProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(A_ ,os.path.join(A_ ,A_ ) )
copyfile(A_ ,os.path.join(A_ ,'vocab.json' ) )
A = AutoProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
A = WavaVecaFeatureExtractor()
A = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
A = WavaVecaProcessor(A_ ,A_ )
# save in new folder
processor.save_pretrained(A_ )
# drop `processor_class` in tokenizer
with open(os.path.join(A_ ,A_ ) ,'r' ) as f:
A = json.load(A_ )
config_dict.pop('processor_class' )
with open(os.path.join(A_ ,A_ ) ,'w' ) as f:
f.write(json.dumps(A_ ) )
A = AutoProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
A = WavaVecaFeatureExtractor()
A = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
A = WavaVecaProcessor(A_ ,A_ )
# save in new folder
processor.save_pretrained(A_ )
# drop `processor_class` in feature extractor
with open(os.path.join(A_ ,A_ ) ,'r' ) as f:
A = json.load(A_ )
config_dict.pop('processor_class' )
with open(os.path.join(A_ ,A_ ) ,'w' ) as f:
f.write(json.dumps(A_ ) )
A = AutoProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
A = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(A_ )
# copy relevant files
copyfile(A_ ,os.path.join(A_ ,'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(A_ ,A_ ) ,'w' ) as f:
f.write('{}' )
A = AutoProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A_ ):
A = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
A = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=A_ )
A = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' ,trust_remote_code=A_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
A = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
A = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
# Test we can also load the slow version
A = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=A_ ,use_fast=A_ )
A = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ ,'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
try:
AutoConfig.register('custom' ,A_ )
AutoFeatureExtractor.register(A_ ,A_ )
AutoTokenizer.register(A_ ,slow_tokenizer_class=A_ )
AutoProcessor.register(A_ ,A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoProcessor.register(A_ ,A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
A = CustomFeatureExtractor.from_pretrained(A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(A_ ,'vocab.txt' )
with open(A_ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = CustomTokenizer(A_ )
A = CustomProcessor(A_ ,A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(A_ )
A = AutoProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ ,A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = False
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = False
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''AutoFeatureExtractor'''
_lowerCamelCase: Tuple = '''AutoTokenizer'''
_lowerCamelCase: Optional[Any] = False
try:
AutoConfig.register('custom' ,A_ )
AutoFeatureExtractor.register(A_ ,A_ )
AutoTokenizer.register(A_ ,slow_tokenizer_class=A_ )
AutoProcessor.register(A_ ,A_ )
# If remote code is not set, the default is to use local classes.
A = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=A_ )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=A_ )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ ,'BertTokenizerFast' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ ,'ConvNextImageProcessor' )
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ) -> Optional[Any]:
A = TOKEN
HfFolder.save_token(A_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-processor' )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = WavaVecaProcessor.from_pretrained(A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(A_ ,'test-processor' ) ,push_to_hub=A_ ,use_auth_token=self._token )
A = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(A_ ,getattr(new_processor.feature_extractor ,A_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = WavaVecaProcessor.from_pretrained(A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(A_ ,'test-processor-org' ) ,push_to_hub=A_ ,use_auth_token=self._token ,organization='valid_org' ,)
A = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(A_ ,getattr(new_processor.feature_extractor ,A_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A = CustomFeatureExtractor.from_pretrained(A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(A_ ,'vocab.txt' )
with open(A_ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = CustomTokenizer(A_ )
A = CustomProcessor(A_ ,A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' ,token=self._token )
A = Repository(A_ ,clone_from=F'{USER}/test-dynamic-processor' ,token=self._token )
processor.save_pretrained(A_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map ,{
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} ,)
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(A_ ,'tokenizer_config.json' ) ) as f:
A = json.load(A_ )
self.assertDictEqual(
tokenizer_config['auto_map'] ,{
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} ,)
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(A_ ,'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(A_ ,'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(A_ ,'custom_processing.py' ) ) )
repo.push_to_hub()
A = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' ,trust_remote_code=A_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ ,'CustomProcessor' ) | 74 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_snake_case : Tuple = 1_92
_snake_case : Any = 7_68
_snake_case : Any = 12
_snake_case : List[Any] = 3
_snake_case : int = [8_00, 13_33]
_snake_case : Tuple = False
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = 3_30
_snake_case : List[str] = 14
_snake_case : List[str] = 6
_snake_case : Union[str, Any] = 13_20
elif "yolos_s" in yolos_name:
_snake_case : Union[str, Any] = 3_84
_snake_case : List[str] = 15_36
_snake_case : Any = 12
_snake_case : Optional[int] = 6
elif "yolos_b" in yolos_name:
_snake_case : Dict = [8_00, 13_44]
_snake_case : str = 91
_snake_case : Optional[Any] = """huggingface/label-files"""
_snake_case : str = """coco-detection-id2label.json"""
_snake_case : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[str] = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosConfig , snake_case__ : bool = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_snake_case : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[: config.hidden_size, :]
_snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
_snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Tuple = in_proj_weight[-config.hidden_size :, :]
_snake_case : List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
if "backbone" in name:
_snake_case : str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
_snake_case : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
_snake_case : str = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
_snake_case : str = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
_snake_case : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_snake_case : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
_snake_case : str = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_snake_case : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : int = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
_snake_case : Union[str, Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
_snake_case : str = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
_snake_case : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosForObjectDetection ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : List[str] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
_snake_case : Optional[Any] = key.split(""".""" )
_snake_case : Optional[Any] = int(key_split[2] )
_snake_case : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_snake_case : str = val[:dim, :]
_snake_case : Optional[Any] = val[
dim : dim * 2, :
]
_snake_case : Optional[Any] = val[-dim:, :]
else:
_snake_case : Dict = val[:dim]
_snake_case : Any = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
else:
_snake_case : Tuple = val
return orig_state_dict
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
_snake_case : Optional[Any] = get_yolos_config(snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
_snake_case : Optional[Any] = YolosForObjectDetection(snake_case__ )
model.eval()
_snake_case : Optional[Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
_snake_case : List[str] = 8_00 if yolos_name != """yolos_ti""" else 5_12
_snake_case : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
_snake_case , _snake_case : Optional[int] = outputs.logits, outputs.pred_boxes
_snake_case , _snake_case : Dict = None, None
if yolos_name == "yolos_ti":
_snake_case : Optional[Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
_snake_case : Tuple = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
_snake_case : List[str] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
_snake_case : List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
_snake_case : Dict = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
_snake_case : Union[str, Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
_snake_case : Optional[Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
_snake_case : int = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
_snake_case : Optional[int] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
_snake_case : Dict = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
_snake_case : str = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization="""hustvl""" )
model.push_to_hub(snake_case__ , organization="""hustvl""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 64 | 0 |
'''simple docstring'''
def a_ ( __snake_case : int = 6008_5147_5143 ) -> int:
"""simple docstring"""
try:
lowerCamelCase_ =int(__snake_case )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowerCamelCase_ =2
lowerCamelCase_ =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCamelCase_ =i
while n % i == 0:
lowerCamelCase_ =n // i
i += 1
return int(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 75 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str]=False ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : List[Any] = """"""
else:
_snake_case : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
_snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
_snake_case : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Union[str, Any] = val
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = ViTMSNConfig()
_snake_case : Any = 10_00
_snake_case : Tuple = """datasets/huggingface/label-files"""
_snake_case : Dict = """imagenet-1k-id2label.json"""
_snake_case : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , """r""" ) )
_snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[Any] = idalabel
_snake_case : str = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_snake_case : Tuple = 3_84
_snake_case : Dict = 15_36
_snake_case : Tuple = 6
elif "l16" in checkpoint_url:
_snake_case : Any = 10_24
_snake_case : int = 40_96
_snake_case : str = 24
_snake_case : Optional[int] = 16
_snake_case : List[Any] = 0.1
elif "b4" in checkpoint_url:
_snake_case : Tuple = 4
elif "l7" in checkpoint_url:
_snake_case : int = 7
_snake_case : Dict = 10_24
_snake_case : Optional[Any] = 40_96
_snake_case : Any = 24
_snake_case : Union[str, Any] = 16
_snake_case : Optional[int] = 0.1
_snake_case : int = ViTMSNModel(snake_case__ )
_snake_case : Optional[int] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )["""target_encoder"""]
_snake_case : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
_snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
_snake_case : str = ViTImageProcessor(
size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : Any = image_processor(images=snake_case__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_snake_case : int = model(**snake_case__ )
_snake_case : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_snake_case : Optional[Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
_snake_case : str = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
_snake_case : Optional[int] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
_snake_case : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
_snake_case : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 64 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ =1
@register_to_config
def __init__( self : Dict , a : int = 1000 , a : Optional[Union[np.ndarray, List[float]]] = None ) -> Any:
"""simple docstring"""
self.set_timesteps(a )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : int = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : str = 4
# running values
SCREAMING_SNAKE_CASE : Optional[int] = []
def __UpperCamelCase ( self : Tuple , a : int , a : Union[str, torch.device] = None ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
SCREAMING_SNAKE_CASE : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Dict = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Any = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : List[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : Optional[Any] = timesteps.to(a )
SCREAMING_SNAKE_CASE : str = []
def __UpperCamelCase ( self : int , a : torch.FloatTensor , a : int , a : torch.FloatTensor , a : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
SCREAMING_SNAKE_CASE : Dict = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Tuple = timestep_index + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(a )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : List[Any] = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Tuple = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : int = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[int] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : List[str] = self._get_prev_sample(a , a , a , a )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def __UpperCamelCase ( self : Optional[int] , a : torch.FloatTensor , *a : Union[str, Any] , **a : Union[str, Any] ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __UpperCamelCase ( self : List[Any] , a : str , a : Tuple , a : Any , a : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : Any = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : str = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : str = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : List[Any] = (sample - sigma * ets) / max(a , 1e-8 )
SCREAMING_SNAKE_CASE : Optional[int] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps | 76 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = list(snake_case__ )
_snake_case : List[Any] = list(snake_case__ )
_snake_case : List[Any] = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count += 1
_snake_case : Any = """_"""
if count > 1:
return False
else:
return "".join(snake_case__ )
def UpperCAmelCase__ (snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = []
while True:
_snake_case : Union[str, Any] = ["""$"""] * len(snake_case__ )
_snake_case : int = []
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
_snake_case : List[Any] = compare_string(binary[i] , binary[j] )
if k is False:
_snake_case : Dict = """*"""
_snake_case : List[Any] = """*"""
temp.append("""X""" )
for i in range(len(snake_case__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case__ ) == 0:
return pi
_snake_case : Optional[int] = list(set(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Sequence[float] ):
"""simple docstring"""
_snake_case : Optional[int] = []
for minterm in minterms:
_snake_case : Any = """"""
for _ in range(snake_case__ ):
_snake_case : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case__ )
return temp
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = list(snake_case__ )
_snake_case : List[str] = list(snake_case__ )
_snake_case : Tuple = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase__ (snake_case__ : list[list[int]] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : Any = []
_snake_case : Union[str, Any] = [0] * len(snake_case__ )
for i in range(len(chart[0] ) ):
_snake_case : Tuple = 0
_snake_case : str = -1
for j in range(len(snake_case__ ) ):
if chart[j][i] == 1:
count += 1
_snake_case : Union[str, Any] = j
if count == 1:
_snake_case : Union[str, Any] = 1
for i in range(len(snake_case__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case__ ) ):
_snake_case : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
_snake_case : Optional[int] = 0
_snake_case : str = -1
_snake_case : Any = 0
for i in range(len(snake_case__ ) ):
_snake_case : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_snake_case : Dict = count_n
_snake_case : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case__ ) ):
_snake_case : Optional[Any] = 0
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )]
for i in range(len(snake_case__ ) ):
_snake_case : Any = prime_implicants[i].count("""_""" )
for j in range(len(snake_case__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ):
_snake_case : Tuple = 1
return chart
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = int(input("""Enter the no. of variables\n""" ) )
_snake_case : List[str] = [
float(snake_case__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_snake_case : List[str] = decimal_to_binary(snake_case__ , snake_case__ )
_snake_case : str = check(snake_case__ )
print("""Prime Implicants are:""" )
print(snake_case__ )
_snake_case : int = prime_implicant_chart(snake_case__ , snake_case__ )
_snake_case : str = selection(snake_case__ , snake_case__ )
print("""Essential Prime Implicants are:""" )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 64 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : Union[str, Any] = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
stooge(snake_case__ , 0 , len(snake_case__ ) - 1 )
return arr
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case : Dict = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case__ , i + t , (snake_case__) )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 64 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = nn.functional.normalize(lowercase_ )
UpperCAmelCase = nn.functional.normalize(lowercase_ )
return torch.mm(lowercase_ , normalized_text_embeds.t() )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ["""CLIPEncoderLayer"""]
def __init__( self :Union[str, Any] , lowercase_ :CLIPConfig ) -> Dict:
super().__init__(lowercase_ )
UpperCAmelCase = CLIPVisionModel(config.vision_config )
UpperCAmelCase = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase_ )
UpperCAmelCase = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase_ )
UpperCAmelCase = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase_ )
UpperCAmelCase = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase_ )
UpperCAmelCase = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase_ )
@torch.no_grad()
def UpperCAmelCase__ ( self :Any , lowercase_ :Dict , lowercase_ :Union[str, Any] ) -> Optional[int]:
UpperCAmelCase = self.vision_model(lowercase_ )[1] # pooled_output
UpperCAmelCase = self.visual_projection(lowercase_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase = cosine_distance(lowercase_ , self.special_care_embeds ).cpu().float().numpy()
UpperCAmelCase = cosine_distance(lowercase_ , self.concept_embeds ).cpu().float().numpy()
UpperCAmelCase = []
UpperCAmelCase = image_embeds.shape[0]
for i in range(lowercase_ ):
UpperCAmelCase = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCAmelCase = special_cos_dist[i][concept_idx]
UpperCAmelCase = self.special_care_embeds_weights[concept_idx].item()
UpperCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
UpperCAmelCase = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCAmelCase = cos_dist[i][concept_idx]
UpperCAmelCase = self.concept_embeds_weights[concept_idx].item()
UpperCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase_ )
result.append(lowercase_ )
UpperCAmelCase = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self :Any , lowercase_ :torch.FloatTensor , lowercase_ :torch.FloatTensor ) -> List[Any]:
UpperCAmelCase = self.vision_model(lowercase_ )[1] # pooled_output
UpperCAmelCase = self.visual_projection(lowercase_ )
UpperCAmelCase = cosine_distance(lowercase_ , self.special_care_embeds )
UpperCAmelCase = cosine_distance(lowercase_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase = 0.0
UpperCAmelCase = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCAmelCase = torch.any(special_scores > 0 , dim=1 )
UpperCAmelCase = special_care * 0.01
UpperCAmelCase = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCAmelCase = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCAmelCase = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 78 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["note_seq"]
def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(self, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
| 64 | 0 |
'''simple docstring'''
import requests
def __lowercase ( __lowercase , __lowercase ) -> None:
'''simple docstring'''
_A = {"Content-Type": "application/json"}
_A = requests.post(__lowercase , json={"text": message_body} , headers=__lowercase )
if response.status_code != 200:
_A = (
"Request to slack returned an error "
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(__lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 79 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : int = data
_snake_case : Dict = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def UpperCamelCase_ ( a_: Optional[Any], a_: Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_snake_case : Optional[int] = self.data + padding + struct.pack(""">Q""", 8 * len(self.data ) )
return padded_data
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = list(struct.unpack(""">16L""", a_ ) ) + [0] * 64
for i in range(16, 80 ):
_snake_case : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.padding()
_snake_case : str = self.split_blocks()
for block in self.blocks:
_snake_case : Any = self.expand_block(a_ )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.h
for i in range(0, 80 ):
if 0 <= i < 20:
_snake_case : int = (b & c) | ((~b) & d)
_snake_case : str = 0X5a827999
elif 20 <= i < 40:
_snake_case : Optional[int] = b ^ c ^ d
_snake_case : str = 0X6ed9eba1
elif 40 <= i < 60:
_snake_case : List[Any] = (b & c) | (b & d) | (c & d)
_snake_case : List[Any] = 0X8f1bbcdc
elif 60 <= i < 80:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : int = 0Xca62c1d6
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = (
self.rotate(a_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(a_, 30 ),
c,
d,
)
_snake_case : Union[str, Any] = (
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = B"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_snake_case : Union[str, Any] = parser.parse_args()
_snake_case : List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_snake_case : str = f.read()
else:
_snake_case : int = bytes(snake_case__ , """utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase_ ( nn.Module ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = jnp.floataa
def __a ( self ):
UpperCamelCase__ = []
UpperCamelCase__ = []
for i in range(self.num_layers ):
UpperCamelCase__ = self.in_channels if i == 0 else self.out_channels
UpperCamelCase__ = FlaxResnetBlockaD(
in_channels=a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
UpperCamelCase__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
UpperCamelCase__ = resnets
UpperCamelCase__ = attentions
if self.add_downsample:
UpperCamelCase__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , a , a , a , a=True ):
UpperCamelCase__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCamelCase__ = resnet(a , a , deterministic=a )
UpperCamelCase__ = attn(a , a , deterministic=a )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase__ = self.downsamplers_a(a )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = jnp.floataa
def __a ( self ):
UpperCamelCase__ = []
for i in range(self.num_layers ):
UpperCamelCase__ = self.in_channels if i == 0 else self.out_channels
UpperCamelCase__ = FlaxResnetBlockaD(
in_channels=a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
UpperCamelCase__ = resnets
if self.add_downsample:
UpperCamelCase__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , a , a , a=True ):
UpperCamelCase__ = ()
for resnet in self.resnets:
UpperCamelCase__ = resnet(a , a , deterministic=a )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase__ = self.downsamplers_a(a )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = jnp.floataa
def __a ( self ):
UpperCamelCase__ = []
UpperCamelCase__ = []
for i in range(self.num_layers ):
UpperCamelCase__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase__ = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
UpperCamelCase__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
UpperCamelCase__ = resnets
UpperCamelCase__ = attentions
if self.add_upsample:
UpperCamelCase__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , a , a , a , a , a=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCamelCase__ = res_hidden_states_tuple[-1]
UpperCamelCase__ = res_hidden_states_tuple[:-1]
UpperCamelCase__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase__ = resnet(a , a , deterministic=a )
UpperCamelCase__ = attn(a , a , deterministic=a )
if self.add_upsample:
UpperCamelCase__ = self.upsamplers_a(a )
return hidden_states
class lowercase_ ( nn.Module ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = jnp.floataa
def __a ( self ):
UpperCamelCase__ = []
for i in range(self.num_layers ):
UpperCamelCase__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase__ = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
UpperCamelCase__ = resnets
if self.add_upsample:
UpperCamelCase__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , a , a , a , a=True ):
for resnet in self.resnets:
# pop res hidden states
UpperCamelCase__ = res_hidden_states_tuple[-1]
UpperCamelCase__ = res_hidden_states_tuple[:-1]
UpperCamelCase__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase__ = resnet(a , a , deterministic=a )
if self.add_upsample:
UpperCamelCase__ = self.upsamplers_a(a )
return hidden_states
class lowercase_ ( nn.Module ):
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = jnp.floataa
def __a ( self ):
# there is always at least one resnet
UpperCamelCase__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCamelCase__ = []
for _ in range(self.num_layers ):
UpperCamelCase__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
UpperCamelCase__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
UpperCamelCase__ = resnets
UpperCamelCase__ = attentions
def __call__( self , a , a , a , a=True ):
UpperCamelCase__ = self.resnets[0](a , a )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCamelCase__ = attn(a , a , deterministic=a )
UpperCamelCase__ = resnet(a , a , deterministic=a )
return hidden_states
| 80 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(__a )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "rag"
lowercase__ = True
def __init__( self: Union[str, Any], a_: int=None, a_: Tuple=True, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, a_: Optional[Any]=" / ", a_: Tuple=" // ", a_: List[Any]=5, a_: Dict=300, a_: Tuple=768, a_: Optional[Any]=8, a_: int="wiki_dpr", a_: Any="train", a_: Optional[int]="compressed", a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: str=False, a_: Dict=0.0, a_: Union[str, Any]=True, a_: Union[str, Any]=False, a_: str=False, a_: List[str]=False, a_: Union[str, Any]=True, a_: Any=None, **a_: List[Any], ):
'''simple docstring'''
super().__init__(
bos_token_id=a_, pad_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, forced_eos_token_id=a_, is_encoder_decoder=a_, prefix=a_, vocab_size=a_, **a_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_snake_case : Union[str, Any] = kwargs.pop("""question_encoder""" )
_snake_case : List[str] = question_encoder_config.pop("""model_type""" )
_snake_case : Union[str, Any] = kwargs.pop("""generator""" )
_snake_case : Any = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_snake_case : Union[str, Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Optional[Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Any = reduce_loss
_snake_case : Optional[int] = label_smoothing
_snake_case : Dict = exclude_bos_score
_snake_case : int = do_marginalize
_snake_case : Optional[Any] = title_sep
_snake_case : Any = doc_sep
_snake_case : List[str] = n_docs
_snake_case : Tuple = max_combined_length
_snake_case : Optional[Any] = dataset
_snake_case : Union[str, Any] = dataset_split
_snake_case : Tuple = index_name
_snake_case : Any = retrieval_vector_size
_snake_case : Union[str, Any] = retrieval_batch_size
_snake_case : str = passages_path
_snake_case : Tuple = index_path
_snake_case : List[Any] = use_dummy_dataset
_snake_case : Optional[Any] = output_retrieved
_snake_case : Tuple = do_deduplication
_snake_case : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_snake_case : Dict = getattr(self.generator, """forced_eos_token_id""", a_ )
@classmethod
def UpperCamelCase_ ( cls: Any, a_: PretrainedConfig, a_: PretrainedConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : List[str] = self.question_encoder.to_dict()
_snake_case : Tuple = self.generator.to_dict()
_snake_case : Dict = self.__class__.model_type
return output
| 64 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : Optional[Any] = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
lowerCamelCase_ : Dict = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
lowerCamelCase_ : Union[str, Any] = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
lowerCamelCase_ : List[str] = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
A_ = TypeVar('''T''')
A_ = Union[List[T], Tuple[T, ...]]
A_ = Union[T, List[T], Dict[str, T]]
A_ = Union[str, bytes, os.PathLike]
| 64 | 0 |
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_lowerCAmelCase = []
_lowerCAmelCase = list(range(snake_case ) )
# Find permutation
while factorials:
_lowerCAmelCase = factorials.pop()
_lowerCAmelCase , _lowerCAmelCase = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
_snake_case : List[Any] = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_snake_case , _snake_case : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
_snake_case , _snake_case : List[str] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 64 | 0 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 83 |
"""simple docstring"""
from math import factorial
A_ = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64 | 0 |
"""simple docstring"""
class _SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self , __A , __A , __A ) -> None:
lowerCAmelCase_ :List[str] = row
lowerCAmelCase_ :Tuple = col
lowerCAmelCase_ :str = graph
def __lowerCAmelCase ( self , __A , __A , __A ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCAmelCase ( self , __A , __A , __A ) -> None:
# Checking all 8 elements surrounding nth element
lowerCAmelCase_ :int = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowerCAmelCase_ :Tuple = [-1, 0, 1, -1, 1, -1, 0, 1]
lowerCAmelCase_ :int = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __A )
def __lowerCAmelCase ( self ) -> int: # And finally, count all islands.
lowerCAmelCase_ :int = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowerCAmelCase_ :Optional[int] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__A , __A , __A )
count += 1
return count
| 84 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : int ):
"""simple docstring"""
if len(snake_case__ ) < k or k < 0:
raise ValueError("""Invalid Input""" )
_snake_case : Optional[int] = sum(array[:k] )
for i in range(len(snake_case__ ) - k ):
_snake_case : Optional[Any] = current_sum - array[i] + array[i + k]
_snake_case : List[str] = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A_ = [randint(-10_00, 10_00) for i in range(1_00)]
A_ = randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 64 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( lowercase_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = relative_attention
snake_case_ = position_biased_input
snake_case_ = pos_att_type
snake_case_ = scope
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.get_config()
snake_case_ = 300
return config
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
snake_case_ = DebertaModel(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
snake_case_ = model(a__ , token_type_ids=a__ )[0]
snake_case_ = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
snake_case_ = DebertaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = DebertaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = DebertaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = DebertaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Dict = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Tuple = False
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = DebertaModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , hidden_size=37 )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
@slow
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DebertaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = DebertaModel.from_pretrained("microsoft/deberta-base" )
snake_case_ = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
snake_case_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
snake_case_ = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 85 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''yjernite/retribert-base-uncased''': 5_12,
}
A_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = RetriBertTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: int, a_: int=None, a_: Dict=None, a_: Any=True, a_: int="[UNK]", a_: Any="[SEP]", a_: List[Any]="[PAD]", a_: List[Any]="[CLS]", a_: str="[MASK]", a_: Dict=True, a_: Optional[int]=None, **a_: Tuple, ):
'''simple docstring'''
super().__init__(
a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, )
_snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", a_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", a_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars
):
_snake_case : Dict = getattr(a_, normalizer_state.pop("""type""" ) )
_snake_case : List[Any] = do_lower_case
_snake_case : List[str] = strip_accents
_snake_case : Tuple = tokenize_chinese_chars
_snake_case : Tuple = normalizer_class(**a_ )
_snake_case : List[str] = do_lower_case
def UpperCamelCase_ ( self: Any, a_: str, a_: Optional[int]=None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self: List[str], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
| 64 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=_lowerCamelCase):
A_ : Optional[int] = ['keras_nlp']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(self , ['keras_nlp'] ) | 86 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"add_prefix_space": True}
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : List[Any] = {"""unk_token""": """<unk>"""}
_snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def UpperCamelCase_ ( self: Any, **a_: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Any, **a_: str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = """lower newer"""
_snake_case : Tuple = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
_snake_case : Optional[Any] = """lower newer"""
_snake_case : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : int = tokenizer.tokenize(a_, add_prefix_space=a_ )
self.assertListEqual(a_, a_ )
_snake_case : str = tokens + [tokenizer.unk_token]
_snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : int = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : Dict = """lower newer"""
# Testing tokenization
_snake_case : Dict = tokenizer.tokenize(a_, add_prefix_space=a_ )
_snake_case : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids without special tokens
_snake_case : Optional[Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : Tuple = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids with special tokens
_snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : int = tokenizer.encode(a_, add_prefix_space=a_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
# Testing the unknown token
_snake_case : Tuple = tokens + [rust_tokenizer.unk_token]
_snake_case : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Dict, *a_: Dict, **a_: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int, a_: List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
# Simple input
_snake_case : Any = """This is a simple input"""
_snake_case : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Optional[int] = ("""This is a simple input""", """This is a pair""")
_snake_case : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
# Pair input
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="""<pad>""" )
# Simple input
_snake_case : List[Any] = """This is a simple input"""
_snake_case : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
_snake_case : Any = ("""This is a simple input""", """This is a pair""")
_snake_case : str = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_snake_case : str = tokenizer.pad_token_id
_snake_case : Optional[int] = tokenizer(a_, padding="""max_length""", max_length=30, return_tensors="""np""" )
_snake_case : Dict = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
_snake_case : Tuple = tokenizer(*a_, padding="""max_length""", max_length=60, return_tensors="""np""" )
_snake_case : Optional[Any] = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = """$$$"""
_snake_case : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=a_, add_bos_token=a_ )
_snake_case : str = """This is a simple input"""
_snake_case : int = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Tuple = tokenizer(a_ )
_snake_case : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0], a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case : Optional[int] = tokenizer.decode(out_s.input_ids )
_snake_case : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_snake_case : Dict = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_snake_case : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_snake_case : Optional[Any] = tokenizer.encode(a_ )
_snake_case : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_snake_case : Optional[Any] = tokenizer.decode(a_, truncate_before_pattern=a_ )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
| 64 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A_ = re.compile(r'''\s+''')
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(snake_case__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Any = [len(snake_case__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )}
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str]=5 ):
"""simple docstring"""
_snake_case : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_snake_case : Tuple = example["""content"""].splitlines()
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=0.05 ):
"""simple docstring"""
_snake_case : Optional[Any] = ["""unit tests""", """test file""", """configuration file"""]
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : Dict = 0
_snake_case : str = 0
# first test
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case : Optional[int] = example["""content"""].count("""\n""" )
_snake_case : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = ["""def """, """class """, """for """, """while """]
_snake_case : str = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str]=4 ):
"""simple docstring"""
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : str = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Optional[Any] = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""]
_snake_case : Optional[Any] = len(example["""content"""] ) / len(snake_case__ )
return {"ratio": ratio}
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[int] = {}
results.update(get_hash(snake_case__ ) )
results.update(line_stats(snake_case__ ) )
results.update(alpha_stats(snake_case__ ) )
results.update(char_token_ratio(snake_case__ ) )
results.update(is_autogenerated(snake_case__ ) )
results.update(is_config_or_test(snake_case__ ) )
results.update(has_no_keywords(snake_case__ ) )
results.update(has_few_assignments(snake_case__ ) )
return results
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if not check_uniques(snake_case__ , snake_case__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """rb""" ) as f_in:
with gzip.open(str(snake_case__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case__ , snake_case__ )
os.unlink(snake_case__ )
# Settings
A_ = HfArgumentParser(PreprocessingArguments)
A_ = parser.parse_args()
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A_ = time.time()
A_ = load_dataset(args.dataset_name, split='''train''')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
A_ = time.time()
A_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
A_ = set(ds.unique('''hash'''))
A_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
A_ = time.time()
A_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A_ = time.time()
A_ , A_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
A_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
A_ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
A_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A_ = str(data_dir / F'''file-{file_number+1:012}.json''')
A_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 64 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowerCAmelCase : str = logging.get_logger(__name__)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : str = None , UpperCamelCase__ : uuid.UUID = None , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[str]=None ) -> List[str]:
"""simple docstring"""
if not conversation_id:
__magic_name__ = uuid.uuida()
if past_user_inputs is None:
__magic_name__ = []
if generated_responses is None:
__magic_name__ = []
__magic_name__ = conversation_id
__magic_name__ = past_user_inputs
__magic_name__ = generated_responses
__magic_name__ = text
def __eq__( self : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowercase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> Tuple:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
__magic_name__ = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
__magic_name__ = text
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__magic_name__ = None
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
self.generated_responses.append(UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
__magic_name__ = """user""" if is_user else """bot"""
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
_A , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if self.tokenizer.pad_token_id is None:
__magic_name__ = self.tokenizer.eos_token
def _lowercase ( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
__magic_name__ = {}
__magic_name__ = {}
__magic_name__ = {}
if min_length_for_response is not None:
__magic_name__ = min_length_for_response
if minimum_tokens is not None:
__magic_name__ = minimum_tokens
if "max_length" in generate_kwargs:
__magic_name__ = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__magic_name__ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : int , UpperCamelCase__ : Union[Conversation, List[Conversation]] , UpperCamelCase__ : Any=0 , **UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = super().__call__(UpperCamelCase__ , num_workers=UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) == 1:
return outputs[0]
return outputs
def _lowercase ( self : List[Any] , UpperCamelCase__ : Conversation , UpperCamelCase__ : Union[str, Any]=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__magic_name__ = self.tokenizer._build_conversation_input_ids(UpperCamelCase__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__magic_name__ = self._legacy_parse_and_tokenize(UpperCamelCase__ )
if self.framework == "pt":
__magic_name__ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__magic_name__ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowercase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=10 , **UpperCamelCase__ : int ) -> Dict:
"""simple docstring"""
__magic_name__ = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__magic_name__ = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
__magic_name__ = max_length - minimum_tokens
__magic_name__ = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__magic_name__ = model_inputs["""attention_mask"""][:, -trim:]
__magic_name__ = model_inputs.pop("""conversation""" )
__magic_name__ = max_length
__magic_name__ = self.model.generate(**UpperCamelCase__ , **UpperCamelCase__ )
if self.model.config.is_encoder_decoder:
__magic_name__ = 1
else:
__magic_name__ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=True ) -> str:
"""simple docstring"""
__magic_name__ = model_outputs["""output_ids"""]
__magic_name__ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
__magic_name__ = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase__ )
return conversation
def _lowercase ( self : List[Any] , UpperCamelCase__ : Conversation ) -> Dict:
"""simple docstring"""
__magic_name__ = self.tokenizer.eos_token_id
__magic_name__ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
if len(UpperCamelCase__ ) > self.tokenizer.model_max_length:
__magic_name__ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 88 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = ort.SessionOptions()
_snake_case : Union[str, Any] = False
return options
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_snake_case : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""onnx""", safety_checker=a_, feature_extractor=a_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[Any] = """A red cat sitting on a park bench"""
_snake_case : Optional[int] = np.random.RandomState(0 )
_snake_case : Any = pipe(
prompt=a_, image=a_, mask_image=a_, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=a_, output_type="""np""", )
_snake_case : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 64 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( lowerCAmelCase_=None ) -> Optional[int]:
if subparsers is not None:
_a : List[str] = subparsers.add_parser('test' )
else:
_a : Union[str, Any] = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=lowerCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
_a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
_a : Tuple = script_name
else:
_a : Any = f"""--config_file={args.config_file} {script_name}"""
_a : Optional[Any] = ['accelerate-launch'] + test_args.split()
_a : Tuple = execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def __lowerCamelCase ( ) -> Tuple:
_a : Union[str, Any] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 89 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_snake_case : int = value
elif weight_type == "weight_g":
_snake_case : str = value
elif weight_type == "weight_v":
_snake_case : Tuple = value
elif weight_type == "bias":
_snake_case : List[str] = value
else:
_snake_case : int = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : List[Any] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_snake_case : Optional[Any] = None
for name, value in fairseq_dict.items():
_snake_case : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : Dict = True
elif name.split(""".""" )[0] == "proj":
_snake_case : Dict = fairseq_model.proj
_snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : Dict = True
if "*" in mapped_key:
_snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : str = """weight_g"""
elif "weight_v" in name:
_snake_case : Optional[Any] = """weight_v"""
elif "bias" in name:
_snake_case : Union[str, Any] = """bias"""
elif "weight" in name:
_snake_case : int = """weight"""
else:
_snake_case : Optional[int] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = full_name.split("""conv_layers.""" )[-1]
_snake_case : Optional[int] = name.split(""".""" )
_snake_case : List[str] = int(items[0] )
_snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_snake_case : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_snake_case : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_snake_case : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = emb.weight.shape
_snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Any = f.readlines()
_snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines]
_snake_case : str = len(snake_case__ )
_snake_case : Tuple = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
_snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ )
_snake_case : List[str] = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
_snake_case : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_snake_case : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
_snake_case : Any = WavaVecaModel(snake_case__ )
_snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
_snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ )
_snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
_snake_case : Any = False
# add projection layer
_snake_case : int = nn.Parameter(projection_layer.weight )
_snake_case : Any = nn.Parameter(projection_layer.bias )
_snake_case : Any = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(snake_case__ , snake_case__ )
_snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) )
tokenizer.save_pretrained(snake_case__ )
_snake_case : str = hf_wavavec.config.to_dict()
_snake_case : List[str] = tokenizer.pad_token_id
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Union[str, Any] = tokenizer.eos_token_id
_snake_case : Optional[Any] = """speech_to_text_2"""
_snake_case : Optional[int] = """wav2vec2"""
_snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 64 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__A = (3, 9, -11, 0, 7, 5, 1, -1)
__A = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
__lowerCamelCase = None
for i in sorted(lowerCamelCase__ , reverse=lowerCamelCase__ ):
__lowerCamelCase = Node(lowerCamelCase__ , self.head )
def __iter__( self ) -> Iterator[int]:
'''simple docstring'''
__lowerCamelCase = self.head
while node:
yield node.data
__lowerCamelCase = node.next_node
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self ) -> str:
'''simple docstring'''
return " -> ".join([str(lowerCamelCase__ ) for node in self] )
def lowerCamelCase_ ( UpperCamelCase__ : SortedLinkedList , UpperCamelCase__ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase__ ) + list(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 90 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ):
"""simple docstring"""
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : Any ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : List[Any] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : str = 16
elif accelerator.mixed_precision != "no":
_snake_case : Optional[int] = 8
else:
_snake_case : Optional[int] = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
_snake_case : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
_snake_case : List[Any] = 2
# Initialize accelerator
_snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config["""lr"""]
_snake_case : str = int(config["""num_epochs"""] )
_snake_case : Union[str, Any] = int(config["""seed"""] )
_snake_case : Union[str, Any] = int(config["""batch_size"""] )
_snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ )
_snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
_snake_case : str = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : int = model(**snake_case__ )
_snake_case : str = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : int = model(**snake_case__ )
_snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
_snake_case : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : Dict = parser.parse_args()
_snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 64 | 0 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _A (__a , __a ) -> List[Any]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE_ : List[str] = flax_key_tuple[:-1] + ('''weight''',)
SCREAMING_SNAKE_CASE_ : Dict = torch.permute(__a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__a ):
# linear layer
SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple[:-1] + ('''weight''',)
SCREAMING_SNAKE_CASE_ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def _A (__a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
if "metadata" in layer:
SCREAMING_SNAKE_CASE_ : Tuple = layer.split('''metadata''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ : List[Any] = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE_ : Any = layer.split('''kvstore''' )
SCREAMING_SNAKE_CASE_ : Tuple = ''''''.join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer.split('''/''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = '''/'''.join(split_layer[:-1] )
SCREAMING_SNAKE_CASE_ : str = (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE_ : Optional[int] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''file'''
else:
SCREAMING_SNAKE_CASE_ : int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = rename_keys(__a )
SCREAMING_SNAKE_CASE_ : List[str] = {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE_ : Optional[Any] = v
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_current_block
torch.save(__a , __a )
def _A (__a , __a , __a , __a , __a = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_file_size_to_int(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : List[Any] = 0
os.makedirs(__a , exist_ok=__a )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
SCREAMING_SNAKE_CASE_ : Dict = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = flatten_dict(__a , sep='''/''' )
SCREAMING_SNAKE_CASE_ : Tuple = {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = get_key_and_tensorstore_dict(
__a , __a , __a )
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE_ : Any = content
else:
SCREAMING_SNAKE_CASE_ : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE_ : List[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = rename_base_flax_keys(tuple(key.split('''/''' ) ) , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''/'''.join(__a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(
__a , weights_name.replace('''.bin''' , f'-{len(__a )+1:05d}-of-???.bin' ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
del current_block
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : str = raw_weights.to(getattr(__a , __a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(__a , weights_name.replace('''.bin''' , f'-{len(__a )+1:05d}-of-???.bin' ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
for idx, shard in enumerate(__a ):
SCREAMING_SNAKE_CASE_ : List[str] = weights_name.replace(
'''.bin''' , f'-{idx+1:05d}-of-{len(__a ):05d}.bin' ) # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(__a , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(__a , os.path.join(__a , __a ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = shard
for key in shard:
SCREAMING_SNAKE_CASE_ : Optional[Any] = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE_ : Dict = {'''total_size''': total_size}
SCREAMING_SNAKE_CASE_ : List[Any] = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__a , __a ) , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '''\n'''
f.write(__a )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
UpperCAmelCase_ : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _A () -> Tuple:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ : str = TaTokenizer.from_pretrained('''t5-small''' )
SCREAMING_SNAKE_CASE_ : str = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(__a , return_tensors='''pt''' ).input_ids
SCREAMING_SNAKE_CASE_ : int = model.generate(__a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 91 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = StableUnCLIPPipeline
_a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_a : Optional[Any] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 3_2
__lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
__lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 92 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A_ = logging.get_logger(__name__)
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = None
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Tuple, a_: int, a_: int, a_: str, **a_: Dict ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any], a_: List[str] ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCamelCase_ ( cls: Tuple ):
'''simple docstring'''
return f"`pip install {cls.pip_package or cls.name}`"
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "optuna"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: int, a_: str, **a_: List[str] ):
'''simple docstring'''
return run_hp_search_optuna(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Any ):
'''simple docstring'''
return default_hp_space_optuna(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "ray"
lowercase__ = "'ray[tune]'"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def UpperCamelCase_ ( self: int, a_: Optional[Any], a_: int, a_: str, **a_: List[Any] ):
'''simple docstring'''
return run_hp_search_ray(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Tuple ):
'''simple docstring'''
return default_hp_space_ray(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "sigopt"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: str, **a_: int ):
'''simple docstring'''
return run_hp_search_sigopt(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: List[str] ):
'''simple docstring'''
return default_hp_space_sigopt(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "wandb"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: str, **a_: Union[str, Any] ):
'''simple docstring'''
return run_hp_search_wandb(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Any ):
'''simple docstring'''
return default_hp_space_wandb(a_ )
A_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case__ ) > 0:
_snake_case : Any = available_backends[0].name
if len(snake_case__ ) > 1:
logger.info(
F"{len(snake_case__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 64 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_lowercase : List[Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def _snake_case ( cls ):
"""simple docstring"""
lowercase_ : Optional[int] = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE )
@classmethod
def _snake_case ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase_ : Union[str, Any] = FlaxBertModel(__SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowercase_ : Tuple = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
lowercase_ : List[Any] = flatten_dict(unfreeze(model.params ) )
lowercase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowercase_ : List[str] = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
lowercase_ : List[str] = flatten_dict(unfreeze(model.params ) )
lowercase_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase_ : Dict = FlaxBertModel(__SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowercase_ : Dict = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowercase_ : int = flatten_dict(unfreeze(model.params ) )
lowercase_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowercase_ : Dict = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowercase_ : List[str] = flatten_dict(unfreeze(model.params ) )
lowercase_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : int = True
lowercase_ : str = flatten_dict(modela.params )
lowercase_ : List[str] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowercase_ : Any = False
return models_are_equal
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase_ : List[str] = FlaxBertModel(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : int = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase_ : Optional[Any] = FlaxBertModel(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[str] = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : int = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = '''bert'''
lowercase_ : Union[str, Any] = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = '''bert'''
lowercase_ : Any = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 93 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : str = kwargs.pop("""feature_extractor""" )
_snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
_snake_case : Dict = self.image_processor
_snake_case : Any = False
def __call__( self: Any, *a_: Any, **a_: Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
_snake_case : Dict = kwargs.pop("""images""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : Optional[int] = args[0]
_snake_case : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, *a_, **a_ )
if text is not None:
_snake_case : Tuple = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : List[str] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case : Any = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : int = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : Dict = self.tokenizer.get_added_vocab()
_snake_case : str = {}
while tokens:
_snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE )
if start_token is None:
break
_snake_case : List[Any] = start_token.group(1 )
_snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE )
_snake_case : Dict = start_token.group()
if end_token is None:
_snake_case : List[Any] = tokens.replace(a_, """""" )
else:
_snake_case : List[str] = end_token.group()
_snake_case : str = re.escape(a_ )
_snake_case : str = re.escape(a_ )
_snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE )
if content is not None:
_snake_case : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ )
if value:
if len(a_ ) == 1:
_snake_case : List[str] = value[0]
_snake_case : List[str] = value
else: # leaf nodes
_snake_case : Tuple = []
for leaf in content.split(r"""<sep/>""" ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : int = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
_snake_case : int = output[key][0]
_snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
snake_case : str = HfArgumentParser(InitializationArguments)
snake_case : Tuple = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
snake_case : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
snake_case : str = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
snake_case : Tuple = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
snake_case : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 94 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1_0 , lowerCAmelCase__=3 , lowerCAmelCase__=3_2 * 8 , lowerCAmelCase__=3_2 * 8 , lowerCAmelCase__=4 , lowerCAmelCase__=6_4 , ) -> List[str]:
'''simple docstring'''
a__ : int =parent
a__ : List[Any] =batch_size
a__ : int =is_training
a__ : Optional[Any] =use_auxiliary_loss
a__ : int =num_queries
a__ : Any =num_channels
a__ : Any =min_size
a__ : Optional[Any] =max_size
a__ : List[str] =num_labels
a__ : Dict =hidden_dim
a__ : Dict =hidden_dim
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase__ )
a__ : Tuple =torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase__ )
a__ : Dict =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase__ ) > 0.5
).float()
a__ : Dict =(torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase__ ) > 0.5).long()
a__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Optional[Any] =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
a__ : Optional[Any] =self.num_queries
a__ : int =self.num_labels
a__ : Optional[Any] =[1, 1, 1, 1]
a__ : Dict =self.num_channels
a__ : List[str] =6_4
a__ : Tuple =1_2_8
a__ : List[str] =self.hidden_dim
a__ : str =self.hidden_dim
a__ : int =self.hidden_dim
return config
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ , a__ , a__ , a__ , a__ : int =self.prepare_config_and_inputs()
a__ : Any ={"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Optional[Any] =output.encoder_hidden_states
a__ : str =output.pixel_decoder_hidden_states
a__ : Dict =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , config.decoder_layers )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
a__ : str =MaskaFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Union[str, Any] =model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
a__ : Optional[int] =model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ : Any =MaskaFormerForUniversalSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
def comm_check_on_output(lowerCAmelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a__ : Optional[Any] =model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
a__ : Any =model(lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
a__ : Optional[int] =model(
pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowercase : Union[str, Any] = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : List[str] = False
_lowercase : int = False
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] =MaskaFormerModelTester(self )
a__ : Optional[Any] =ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ , a__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCAmelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _lowercase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _lowercase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
pass
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict =model_class(lowerCAmelCase__ )
a__ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Dict =[*signature.parameters.keys()]
a__ : List[str] =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
a__ : Optional[int] =MaskaFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : List[str] =(self.model_tester.min_size,) * 2
a__ : Any ={
"pixel_values": torch.randn((2, 3, *size) , device=lowerCAmelCase__ ),
"mask_labels": torch.randn((2, 1_0, *size) , device=lowerCAmelCase__ ),
"class_labels": torch.zeros(2 , 1_0 , device=lowerCAmelCase__ ).long(),
}
a__ : Any =self.model_tester.get_config()
a__ : str =MaskaFormerForUniversalSegmentation(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a__ : Any =model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ , a__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[Any] =model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a__ : Optional[int] =model(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
if not self.model_tester.is_training:
return
a__ : Optional[Any] =self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
a__ : str =model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
a__ : List[str] =model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).loss
loss.backward()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Dict =self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ : Dict =self.model_tester.prepare_config_and_inputs()
a__ : Tuple =True
a__ : int =True
a__ : List[Any] =model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
model.train()
a__ : List[str] =model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
a__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a__ : Optional[Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
a__ : Tuple =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a__ : Optional[Any] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase : int = 1E-4
def _A ( ):
"""simple docstring"""
a__ : int =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ )
a__ : Tuple =self.default_image_processor
a__ : Tuple =prepare_img()
a__ : int =image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
a__ : List[Any] =inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
a__ : Tuple =model(**lowerCAmelCase__ )
a__ : Union[str, Any] =torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
a__ : List[Any] =torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
a__ : str =torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Optional[int] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval()
a__ : List[str] =self.default_image_processor
a__ : Optional[int] =prepare_img()
a__ : Optional[int] =image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
a__ : Union[str, Any] =inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
a__ : Tuple =model(**lowerCAmelCase__ )
# masks_queries_logits
a__ : Tuple =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
a__ : int =[
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
a__ : List[str] =torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
a__ : str =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
a__ : Any =torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval()
a__ : Union[str, Any] =self.default_image_processor
a__ : Dict =image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
a__ : int =inputs["pixel_values"].to(lowerCAmelCase__ )
a__ : str =[el.to(lowerCAmelCase__ ) for el in inputs["mask_labels"]]
a__ : Any =[el.to(lowerCAmelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
a__ : Tuple =model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 95 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = CodeGenTokenizer
def __init__( self: Union[str, Any], a_: List[Any]=None, a_: str=None, a_: str=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: str="<|endoftext|>", a_: List[Any]=False, **a_: List[str], ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
if kwargs.pop("""add_bos_token""", a_ ):
_snake_case : str = kwargs.pop("""name_or_path""", """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Dict = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Dict = add_prefix_space
_snake_case : str = pre_tok_class(**a_ )
_snake_case : List[Any] = add_prefix_space
def UpperCamelCase_ ( self: Any, *a_: Any, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], *a_: Any, **a_: List[str] ):
'''simple docstring'''
_snake_case : Dict = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: str, a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: List[str], ):
'''simple docstring'''
_snake_case : Any = super().decode(
token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, )
if truncate_before_pattern is not None and len(a_ ) > 0:
_snake_case : List[str] = self.truncate(a_, a_ )
return decoded_text
def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Optional[Any] ):
'''simple docstring'''
def find_re(a_: Dict, a_: str, a_: Union[str, Any] ):
_snake_case : Any = pattern.search(a_, a_ )
return m.start() if m else -1
_snake_case : Tuple = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern]
_snake_case : List[Any] = list(re.finditer("""^print""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : int = completion[: prints[1].start()]
_snake_case : List[str] = list(re.finditer("""^def""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : List[Any] = completion[: defs[1].start()]
_snake_case : int = 0
_snake_case : List[Any] = [
pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 64 | 0 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase , ):
super().__init__()
_lowerCamelCase : Any = value_function
_lowerCamelCase : str = unet
_lowerCamelCase : Union[str, Any] = scheduler
_lowerCamelCase : Optional[Any] = env
_lowerCamelCase : Tuple = env.get_dataset()
_lowerCamelCase : str = {}
for key in self.data.keys():
try:
_lowerCamelCase : List[str] = self.data[key].mean()
except: # noqa: E722
pass
_lowerCamelCase : List[Any] = {}
for key in self.data.keys():
try:
_lowerCamelCase : List[str] = self.data[key].std()
except: # noqa: E722
pass
_lowerCamelCase : int = env.observation_space.shape[0]
_lowerCamelCase : str = env.action_space.shape[0]
def A_ ( self , lowercase , lowercase ):
return (x_in - self.means[key]) / self.stds[key]
def A_ ( self , lowercase , lowercase ):
return x_in * self.stds[key] + self.means[key]
def A_ ( self , lowercase ):
if type(lowercase ) is dict:
return {k: self.to_torch(lowercase ) for k, v in x_in.items()}
elif torch.is_tensor(lowercase ):
return x_in.to(self.unet.device )
return torch.tensor(lowercase , device=self.unet.device )
def A_ ( self , lowercase , lowercase , lowercase ):
for key, val in cond.items():
_lowerCamelCase : Dict = val.clone()
return x_in
def A_ ( self , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = x.shape[0]
_lowerCamelCase : Any = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_lowerCamelCase : Tuple = torch.full((batch_size,) , lowercase , device=self.unet.device , dtype=torch.long )
for _ in range(lowercase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_lowerCamelCase : str = self.value_function(x.permute(0 , 2 , 1 ) , lowercase ).sample
_lowerCamelCase : Optional[int] = torch.autograd.grad([y.sum()] , [x] )[0]
_lowerCamelCase : Optional[int] = self.scheduler._get_variance(lowercase )
_lowerCamelCase : List[Any] = torch.exp(0.5 * posterior_variance )
_lowerCamelCase : Optional[Any] = model_std * grad
_lowerCamelCase : Tuple = 0
_lowerCamelCase : int = x.detach()
_lowerCamelCase : List[str] = x + scale * grad
_lowerCamelCase : Tuple = self.reset_xa(lowercase , lowercase , self.action_dim )
_lowerCamelCase : Tuple = self.unet(x.permute(0 , 2 , 1 ) , lowercase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_lowerCamelCase : List[str] = self.scheduler.step(lowercase , lowercase , lowercase , predict_epsilon=lowercase )['prev_sample']
# apply conditions to the trajectory (set the initial state)
_lowerCamelCase : Any = self.reset_xa(lowercase , lowercase , self.action_dim )
_lowerCamelCase : Union[str, Any] = self.to_torch(lowercase )
return x, y
def __call__( self , lowercase , lowercase=64 , lowercase=32 , lowercase=2 , lowercase=0.1 ):
# normalize the observations and create batch dimension
_lowerCamelCase : List[str] = self.normalize(lowercase , 'observations' )
_lowerCamelCase : Dict = obs[None].repeat(lowercase , axis=0 )
_lowerCamelCase : Dict = {0: self.to_torch(lowercase )}
_lowerCamelCase : Any = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_lowerCamelCase : Tuple = randn_tensor(lowercase , device=self.unet.device )
_lowerCamelCase : int = self.reset_xa(lowercase , lowercase , self.action_dim )
_lowerCamelCase : Dict = self.to_torch(lowercase )
# run the diffusion process
_lowerCamelCase, _lowerCamelCase : Any = self.run_diffusion(lowercase , lowercase , lowercase , lowercase )
# sort output trajectories by value
_lowerCamelCase : List[Any] = y.argsort(0 , descending=lowercase ).squeeze()
_lowerCamelCase : Union[str, Any] = x[sorted_idx]
_lowerCamelCase : Optional[int] = sorted_values[:, :, : self.action_dim]
_lowerCamelCase : int = actions.detach().cpu().numpy()
_lowerCamelCase : Union[str, Any] = self.de_normalize(lowercase , key='actions' )
# select the action with the highest value
if y is not None:
_lowerCamelCase : int = 0
else:
# if we didn't run value guiding, select a random action
_lowerCamelCase : List[str] = np.random.randint(0 , lowercase )
_lowerCamelCase : Union[str, Any] = denorm_actions[selected_index, 0]
return denorm_actions | 96 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_snake_case : Tuple = 1_92
_snake_case : Any = 7_68
_snake_case : Any = 12
_snake_case : List[Any] = 3
_snake_case : int = [8_00, 13_33]
_snake_case : Tuple = False
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = 3_30
_snake_case : List[str] = 14
_snake_case : List[str] = 6
_snake_case : Union[str, Any] = 13_20
elif "yolos_s" in yolos_name:
_snake_case : Union[str, Any] = 3_84
_snake_case : List[str] = 15_36
_snake_case : Any = 12
_snake_case : Optional[int] = 6
elif "yolos_b" in yolos_name:
_snake_case : Dict = [8_00, 13_44]
_snake_case : str = 91
_snake_case : Optional[Any] = """huggingface/label-files"""
_snake_case : str = """coco-detection-id2label.json"""
_snake_case : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[str] = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosConfig , snake_case__ : bool = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_snake_case : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[: config.hidden_size, :]
_snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
_snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Tuple = in_proj_weight[-config.hidden_size :, :]
_snake_case : List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
if "backbone" in name:
_snake_case : str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
_snake_case : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
_snake_case : str = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
_snake_case : str = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
_snake_case : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_snake_case : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
_snake_case : str = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_snake_case : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : int = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
_snake_case : Union[str, Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
_snake_case : str = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
_snake_case : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosForObjectDetection ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : List[str] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
_snake_case : Optional[Any] = key.split(""".""" )
_snake_case : Optional[Any] = int(key_split[2] )
_snake_case : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_snake_case : str = val[:dim, :]
_snake_case : Optional[Any] = val[
dim : dim * 2, :
]
_snake_case : Optional[Any] = val[-dim:, :]
else:
_snake_case : Dict = val[:dim]
_snake_case : Any = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
else:
_snake_case : Tuple = val
return orig_state_dict
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
_snake_case : Optional[Any] = get_yolos_config(snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
_snake_case : Optional[Any] = YolosForObjectDetection(snake_case__ )
model.eval()
_snake_case : Optional[Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
_snake_case : List[str] = 8_00 if yolos_name != """yolos_ti""" else 5_12
_snake_case : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
_snake_case , _snake_case : Optional[int] = outputs.logits, outputs.pred_boxes
_snake_case , _snake_case : Dict = None, None
if yolos_name == "yolos_ti":
_snake_case : Optional[Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
_snake_case : Tuple = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
_snake_case : List[str] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
_snake_case : List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
_snake_case : Dict = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
_snake_case : Union[str, Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
_snake_case : Optional[Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
_snake_case : int = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
_snake_case : Optional[int] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
_snake_case : Dict = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
_snake_case : str = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization="""hustvl""" )
model.push_to_hub(snake_case__ , organization="""hustvl""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 64 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str]=False ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : List[Any] = """"""
else:
_snake_case : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
_snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
_snake_case : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Union[str, Any] = val
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = ViTMSNConfig()
_snake_case : Any = 10_00
_snake_case : Tuple = """datasets/huggingface/label-files"""
_snake_case : Dict = """imagenet-1k-id2label.json"""
_snake_case : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , """r""" ) )
_snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[Any] = idalabel
_snake_case : str = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_snake_case : Tuple = 3_84
_snake_case : Dict = 15_36
_snake_case : Tuple = 6
elif "l16" in checkpoint_url:
_snake_case : Any = 10_24
_snake_case : int = 40_96
_snake_case : str = 24
_snake_case : Optional[int] = 16
_snake_case : List[Any] = 0.1
elif "b4" in checkpoint_url:
_snake_case : Tuple = 4
elif "l7" in checkpoint_url:
_snake_case : int = 7
_snake_case : Dict = 10_24
_snake_case : Optional[Any] = 40_96
_snake_case : Any = 24
_snake_case : Union[str, Any] = 16
_snake_case : Optional[int] = 0.1
_snake_case : int = ViTMSNModel(snake_case__ )
_snake_case : Optional[int] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )["""target_encoder"""]
_snake_case : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
_snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
_snake_case : str = ViTImageProcessor(
size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : Any = image_processor(images=snake_case__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_snake_case : int = model(**snake_case__ )
_snake_case : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_snake_case : Optional[Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
_snake_case : str = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
_snake_case : Optional[int] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
_snake_case : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
_snake_case : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 64 | 0 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def a_ ( lowerCamelCase ):
return input_array.reshape((input_array.size, 1) )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = np.nan
for i in range(lowerCamelCase ):
UpperCAmelCase__ = features[:, labels == i]
UpperCAmelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase__ = data - column_reshape(lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ = np.dot(lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = features.mean(1 )
UpperCAmelCase__ = np.nan
for i in range(lowerCamelCase ):
UpperCAmelCase__ = features[:, labels == i]
UpperCAmelCase__ = data.shape[1]
UpperCAmelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase ) , (column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ = device_data * np.dot(
column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase ) , (column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def a_ ( lowerCamelCase , lowerCamelCase ):
# Check if the features have been loaded
if features.any():
UpperCAmelCase__ = features.mean(1 )
# Center the dataset
UpperCAmelCase__ = features - np.reshape(lowerCamelCase , (data_mean.size, 1) )
UpperCAmelCase__ = np.dot(lowerCamelCase , centered_data.T ) / features.shape[1]
UpperCAmelCase__ , UpperCAmelCase__ = np.linalg.eigh(lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase__ = np.dot(filtered_eigenvectors.T , lowerCamelCase )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase )
logging.error('Dataset empty' )
raise AssertionError
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase__ , UpperCAmelCase__ = eigh(
covariance_between_classes(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , covariance_within_classes(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , )
UpperCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = np.linalg.svd(lowerCamelCase )
UpperCAmelCase__ = svd_matrix[:, 0:dimensions]
UpperCAmelCase__ = np.dot(filtered_svd_matrix.T , lowerCamelCase )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase )
logging.error('Dataset empty' )
raise AssertionError
def a_ ( ):
# Create dummy dataset with 2 classes and 3 features
UpperCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase__ = 2
UpperCAmelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase ) as error_info:
UpperCAmelCase__ = linear_discriminant_analysis(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def a_ ( ):
UpperCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase__ = 2
UpperCAmelCase__ = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase ) as error_info:
UpperCAmelCase__ = principal_component_analysis(lowerCamelCase , lowerCamelCase )
if not np.allclose(lowerCamelCase , lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = list(snake_case__ )
_snake_case : List[Any] = list(snake_case__ )
_snake_case : List[Any] = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count += 1
_snake_case : Any = """_"""
if count > 1:
return False
else:
return "".join(snake_case__ )
def UpperCAmelCase__ (snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = []
while True:
_snake_case : Union[str, Any] = ["""$"""] * len(snake_case__ )
_snake_case : int = []
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
_snake_case : List[Any] = compare_string(binary[i] , binary[j] )
if k is False:
_snake_case : Dict = """*"""
_snake_case : List[Any] = """*"""
temp.append("""X""" )
for i in range(len(snake_case__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case__ ) == 0:
return pi
_snake_case : Optional[int] = list(set(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Sequence[float] ):
"""simple docstring"""
_snake_case : Optional[int] = []
for minterm in minterms:
_snake_case : Any = """"""
for _ in range(snake_case__ ):
_snake_case : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case__ )
return temp
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = list(snake_case__ )
_snake_case : List[str] = list(snake_case__ )
_snake_case : Tuple = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase__ (snake_case__ : list[list[int]] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : Any = []
_snake_case : Union[str, Any] = [0] * len(snake_case__ )
for i in range(len(chart[0] ) ):
_snake_case : Tuple = 0
_snake_case : str = -1
for j in range(len(snake_case__ ) ):
if chart[j][i] == 1:
count += 1
_snake_case : Union[str, Any] = j
if count == 1:
_snake_case : Union[str, Any] = 1
for i in range(len(snake_case__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case__ ) ):
_snake_case : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
_snake_case : Optional[int] = 0
_snake_case : str = -1
_snake_case : Any = 0
for i in range(len(snake_case__ ) ):
_snake_case : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_snake_case : Dict = count_n
_snake_case : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case__ ) ):
_snake_case : Optional[Any] = 0
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )]
for i in range(len(snake_case__ ) ):
_snake_case : Any = prime_implicants[i].count("""_""" )
for j in range(len(snake_case__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ):
_snake_case : Tuple = 1
return chart
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = int(input("""Enter the no. of variables\n""" ) )
_snake_case : List[str] = [
float(snake_case__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_snake_case : List[str] = decimal_to_binary(snake_case__ , snake_case__ )
_snake_case : str = check(snake_case__ )
print("""Prime Implicants are:""" )
print(snake_case__ )
_snake_case : int = prime_implicant_chart(snake_case__ , snake_case__ )
_snake_case : str = selection(snake_case__ , snake_case__ )
print("""Essential Prime Implicants are:""" )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 64 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase : Optional[Any] = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def A_ ( A__ ) -> Any:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
a__ : Dict = list(s_dict.keys() )
for key in keys:
a__ : Dict = R'.*/layers_(\d+)'
a__ : Tuple = key
if re.match(A__ , A__ ):
a__ : Dict = re.sub(R'layers_(\d+)' , R'block/\1/layer' , A__ )
a__ : Tuple = R'(encoder|decoder)\/'
if re.match(A__ , A__ ):
a__ : List[str] = re.match(A__ , A__ ).groups()
if groups[0] == "encoder":
a__ : Any = re.sub(R'/mlp/' , R'/1/mlp/' , A__ )
a__ : str = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , A__ )
elif groups[0] == "decoder":
a__ : Any = re.sub(R'/mlp/' , R'/2/mlp/' , A__ )
a__ : str = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , A__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
a__ : Union[str, Any] = new_key.replace(A__ , A__ )
print(F'{key} -> {new_key}' )
a__ : str = s_dict.pop(A__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a__ : Optional[int] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a__ : List[Any] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
a__ : Tuple = s_dict[key].shape[0]
a__ : Optional[Any] = s_dict[key]
for idx in range(A__ ):
a__ : List[str] = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(A__ )
return s_dict
lowercase : Any = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def A_ ( A__ , A__ ) -> int:
# Convert a google style config to the hugging face fromat
import regex as re
with open(A__ , 'r' ) as f:
a__ : Union[str, Any] = f.read()
a__ : Any = re.findall(R'(.*) = ([0-9.]*)' , A__ )
a__ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
a__ : Any = float(A__ ) if '.' in value else int(A__ )
a__ : List[str] = re.findall(R'(.*activations) = \(\'(.*)\',\)' , A__ )[0]
a__ : str = str(activation[1] )
a__ : List[str] = num_experts
a__ : Optional[Any] = SwitchTransformersConfig(**A__ )
return config
def A_ ( A__ , A__ , A__=None , A__="./" , A__=8 ) -> Tuple:
# Initialise PyTorch model
print(F'Loading flax weights from : {flax_checkpoint_path}' )
a__ : Union[str, Any] = checkpoints.load_tax_checkpoint(A__ )
if gin_file is not None:
a__ : Union[str, Any] = convert_gin_to_config(A__ , A__ )
else:
a__ : Union[str, Any] = SwitchTransformersConfig.from_pretrained(A__ )
a__ : int = SwitchTransformersForConditionalGeneration(A__ )
a__ : Any = flax_params['target']
a__ : Any = flatten_dict(A__ , sep='/' )
a__ : List[str] = rename_keys(A__ )
a__ : Optional[Any] = unflatten_dict(A__ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(A__ , A__ )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
lowercase : Optional[Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 99 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
stooge(snake_case__ , 0 , len(snake_case__ ) - 1 )
return arr
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case : Dict = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case__ , i + t , (snake_case__) )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 64 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=1_0 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.02 , lowerCAmelCase__="divided_space_time" , lowerCAmelCase__=None , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_frames
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = attention_type
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = (num_frames) * self.num_patches_per_frame + 1
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__SCREAMING_SNAKE_CASE = self.num_labels
return config
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TimesformerModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TimesformerForVideoClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
# verify the logits shape
__SCREAMING_SNAKE_CASE = torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__lowercase : Tuple = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Dict = False
__lowercase : Optional[Any] = False
__lowercase : Union[str, Any] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TimesformerModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False):
__SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCAmelCase__)
if return_labels:
if model_class in get_values(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__)
return inputs_dict
def snake_case_ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TimesformerModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def snake_case_ ( self):
if not self.has_attentions:
pass
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length
__SCREAMING_SNAKE_CASE = self.model_tester.num_frames
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
self.assertEqual(out_len + 1 , len(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def snake_case_ ( self):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__SCREAMING_SNAKE_CASE = np.load(UpperCamelCase_ )
return list(UpperCamelCase_ )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""").to(
lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(video[:8] , return_tensors="""pt""").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor([-0.30_16, -0.77_13, -0.42_05]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
| 100 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["note_seq"]
def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(self, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
| 64 | 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase__ :List[str] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
lowercase__ :Any = "sshleifer/student_marian_en_ro_6_1"
lowercase__ :Optional[Any] = "sshleifer/tiny-mbart"
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self ,A__=False ,A__=None ,A__=True ,A__=True ,A__=True ,A__=True ,):
lowercase = self.run_trainer(
eval_steps=1 ,max_len=1_2 ,model_name=A__ ,num_train_epochs=1 ,distributed=A__ ,extra_args_str=A__ ,predict_with_generate=A__ ,do_train=A__ ,do_eval=A__ ,do_predict=A__ ,)
lowercase = TrainerState.load_from_json(os.path.join(A__ ,'''trainer_state.json''')).log_history
if not do_eval:
return
lowercase = [log for log in logs if '''eval_loss''' in log.keys()]
lowercase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowercase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] ,A__)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A__ ( self):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A__ ( self):
self.run_seqaseq_quick(distributed=A__)
@require_torch_multi_gpu
def A__ ( self):
self.run_seqaseq_quick(distributed=A__)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def A__ ( self):
self.run_seqaseq_quick(distributed=A__ ,extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def A__ ( self):
self.run_seqaseq_quick(distributed=A__ ,extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def A__ ( self):
self.run_seqaseq_quick(distributed=A__ ,extra_args_str='''--sharded_ddp zero_dp_2''' ,predict_with_generate=A__)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def A__ ( self):
self.run_seqaseq_quick(
distributed=A__ ,extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' ,predict_with_generate=A__)
@require_apex
@require_torch_gpu
def A__ ( self):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A__ ,extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ ,extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def A__ ( self ,A__):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
lowercase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
lowercase = experiments[experiment_id]
lowercase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
lowercase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ ,extra_args_str=data['''extra_args_str'''])
lowercase = len(re.findall(A__ ,cl.err))
self.assertEqual(A__ ,data['''n_matches'''])
@slow
def A__ ( self):
lowercase = self.run_trainer(
eval_steps=2 ,max_len=1_2_8 ,model_name=A__ ,learning_rate=3E-4 ,num_train_epochs=1_0 ,distributed=A__ ,)
# Check metrics
lowercase = TrainerState.load_from_json(os.path.join(A__ ,'''trainer_state.json''')).log_history
lowercase = [log for log in logs if '''eval_loss''' in log.keys()]
lowercase = eval_metrics[0]
lowercase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] ,A__)
# test if do_predict saves generations and metrics
lowercase = os.listdir(A__)
lowercase = {os.path.basename(A__) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A__ ( self):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A__) -> Tuple[int, float]:
lowercase = '''--skip_memory_metrics 0'''
lowercase = self.run_trainer(
max_len=1_2_8 ,model_name=A__ ,learning_rate=3E-4 ,num_train_epochs=1 ,optim=A__ ,distributed=A__ ,extra_args_str=A__ ,do_eval=A__ ,do_predict=A__ ,n_gpus_to_use=1 ,)
# Check metrics
lowercase = TrainerState.load_from_json(Path(A__ ,'''trainer_state.json''')).log_history
lowercase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**2_0)
lowercase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**2_0)
lowercase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowercase , lowercase , lowercase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
lowercase , lowercase , lowercase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
lowercase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowercase = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowercase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowercase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowercase = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ ,A__ ,'''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
f' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,)
self.assertGreater(
A__ ,A__ ,'''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
f' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,)
self.assertEqual(
A__ ,A__ ,f'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}')
def A__ ( self ,A__ ,A__ ,A__ ,A__ = 3E-3 ,A__ = "adafactor" ,A__ = False ,A__ = None ,A__ = 0 ,A__ = True ,A__ = True ,A__ = True ,A__ = True ,A__ = None ,):
lowercase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
lowercase = f'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__)}\n '.split()
lowercase = '''
--do_predict
'''.split()
lowercase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowercase = get_gpu_count()
lowercase = get_torch_dist_unique_port()
lowercase = f'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
lowercase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ ,env=self.get_env())
else:
lowercase = ['''run_translation.py'''] + args
with patch.object(A__ ,'''argv''' ,A__):
main()
return output_dir
| 101 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : int = data
_snake_case : Dict = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def UpperCamelCase_ ( a_: Optional[Any], a_: Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_snake_case : Optional[int] = self.data + padding + struct.pack(""">Q""", 8 * len(self.data ) )
return padded_data
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = list(struct.unpack(""">16L""", a_ ) ) + [0] * 64
for i in range(16, 80 ):
_snake_case : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.padding()
_snake_case : str = self.split_blocks()
for block in self.blocks:
_snake_case : Any = self.expand_block(a_ )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.h
for i in range(0, 80 ):
if 0 <= i < 20:
_snake_case : int = (b & c) | ((~b) & d)
_snake_case : str = 0X5a827999
elif 20 <= i < 40:
_snake_case : Optional[int] = b ^ c ^ d
_snake_case : str = 0X6ed9eba1
elif 40 <= i < 60:
_snake_case : List[Any] = (b & c) | (b & d) | (c & d)
_snake_case : List[Any] = 0X8f1bbcdc
elif 60 <= i < 80:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : int = 0Xca62c1d6
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = (
self.rotate(a_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(a_, 30 ),
c,
d,
)
_snake_case : Union[str, Any] = (
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = B"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_snake_case : Union[str, Any] = parser.parse_args()
_snake_case : List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_snake_case : str = f.read()
else:
_snake_case : int = bytes(snake_case__ , """utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 64 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
SCREAMING_SNAKE_CASE : Optional[int] = 5_0003
SCREAMING_SNAKE_CASE : str = 5_0002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =PLBartTokenizer
lowerCamelCase__ =None
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : List[Any] = PLBartTokenizer(a_ , language_codes='''base''' , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = PLBartTokenizer(a_ , language_codes='''base''' , keep_accents=a_ )
__snake_case : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__snake_case : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case : Dict = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case : List[str] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__snake_case : List[Any] = tokenizer.vocab_size
__snake_case : List[Any] = [tokenizer.convert_ids_to_tokens(a_ ) for x in range(end - 4 , a_ )]
self.assertListEqual(a_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
__snake_case : int = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__snake_case : List[str] = tokenizer(a_ ).input_ids
self.assertEqual(
tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) , a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = PLBartTokenizer(a_ , language_codes='''multi''' , keep_accents=a_ )
__snake_case : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__snake_case : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case : Any = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__snake_case : List[Any] = tokenizer.vocab_size
__snake_case : List[Any] = [tokenizer.convert_ids_to_tokens(a_ ) for x in range(end - 7 , a_ )]
self.assertListEqual(
a_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
__snake_case : Dict = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__snake_case : int = tokenizer(a_ ).input_ids
self.assertEqual(
tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) , a_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ ='uclanlp/plbart-python-en_XX'
lowerCamelCase__ =[
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
lowerCamelCase__ =[
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
lowerCamelCase__ =[
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def SCREAMING_SNAKE_CASE (cls ):
'''simple docstring'''
__snake_case : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
__snake_case : Optional[int] = 1
return cls
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_00_03 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.assertIn(a_ , self.tokenizer.all_special_ids )
__snake_case : Optional[int] = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
__snake_case : Tuple = self.tokenizer.decode(a_ , skip_special_tokens=a_ )
__snake_case : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a_ )
self.assertEqual(a_ , a_ )
self.assertNotIn(self.tokenizer.eos_token , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , a_ )
__snake_case : int = 10
__snake_case : Optional[Any] = self.tokenizer(a_ , max_length=a_ , truncation=a_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a_ )
self.assertEqual(len(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_00_04, 5_00_01] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = tempfile.mkdtemp()
__snake_case : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a_ )
__snake_case : List[str] = PLBartTokenizer.from_pretrained(a_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a_ )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a_ , return_tensors='''pt''' )
__snake_case : Optional[int] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__snake_case : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
__snake_case : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.tokenizer(self.src_text , padding=a_ , truncation=a_ , max_length=3 , return_tensors='''pt''' )
__snake_case : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=10 , return_tensors='''pt''' )
__snake_case : Optional[int] = targets['''input_ids''']
__snake_case : Optional[Any] = shift_tokens_right(a_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(a_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[1_50, 2_42, 2, 5_00_03]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_00_01,
} , )
| 102 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(__a )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "rag"
lowercase__ = True
def __init__( self: Union[str, Any], a_: int=None, a_: Tuple=True, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, a_: Optional[Any]=" / ", a_: Tuple=" // ", a_: List[Any]=5, a_: Dict=300, a_: Tuple=768, a_: Optional[Any]=8, a_: int="wiki_dpr", a_: Any="train", a_: Optional[int]="compressed", a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: str=False, a_: Dict=0.0, a_: Union[str, Any]=True, a_: Union[str, Any]=False, a_: str=False, a_: List[str]=False, a_: Union[str, Any]=True, a_: Any=None, **a_: List[Any], ):
'''simple docstring'''
super().__init__(
bos_token_id=a_, pad_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, forced_eos_token_id=a_, is_encoder_decoder=a_, prefix=a_, vocab_size=a_, **a_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_snake_case : Union[str, Any] = kwargs.pop("""question_encoder""" )
_snake_case : List[str] = question_encoder_config.pop("""model_type""" )
_snake_case : Union[str, Any] = kwargs.pop("""generator""" )
_snake_case : Any = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_snake_case : Union[str, Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Optional[Any] = AutoConfig.for_model(a_, **a_ )
_snake_case : Any = reduce_loss
_snake_case : Optional[int] = label_smoothing
_snake_case : Dict = exclude_bos_score
_snake_case : int = do_marginalize
_snake_case : Optional[Any] = title_sep
_snake_case : Any = doc_sep
_snake_case : List[str] = n_docs
_snake_case : Tuple = max_combined_length
_snake_case : Optional[Any] = dataset
_snake_case : Union[str, Any] = dataset_split
_snake_case : Tuple = index_name
_snake_case : Any = retrieval_vector_size
_snake_case : Union[str, Any] = retrieval_batch_size
_snake_case : str = passages_path
_snake_case : Tuple = index_path
_snake_case : List[Any] = use_dummy_dataset
_snake_case : Optional[Any] = output_retrieved
_snake_case : Tuple = do_deduplication
_snake_case : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_snake_case : Dict = getattr(self.generator, """forced_eos_token_id""", a_ )
@classmethod
def UpperCamelCase_ ( cls: Any, a_: PretrainedConfig, a_: PretrainedConfig, **a_: Optional[Any] ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : List[str] = self.question_encoder.to_dict()
_snake_case : Tuple = self.generator.to_dict()
_snake_case : Dict = self.__class__.model_type
return output
| 64 | 0 |
def UpperCamelCase( __UpperCamelCase : int = 1000 ):
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 103 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
A_ = TypeVar('''T''')
A_ = Union[List[T], Tuple[T, ...]]
A_ = Union[T, List[T], Dict[str, T]]
A_ = Union[str, bytes, os.PathLike]
| 64 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase__ = {'''bert_for_seq_generation''': 512}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : Tuple="<s>" ,lowercase__ : Union[str, Any]="</s>" ,lowercase__ : str="<unk>" ,lowercase__ : Tuple="<pad>" ,lowercase__ : Union[str, Any]="<::::>" ,lowercase__ : Optional[Dict[str, Any]] = None ,**lowercase__ : Any ,):
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,sep_token=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase__ ,)
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Optional[int] ,lowercase__ : Optional[Any] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : str ):
return self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
return self.sp_model.piece_to_id(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.IdToPiece(lowercase__ )
return token
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = []
__lowercase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase__ ) + token
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 104 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
_snake_case : List[Any] = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_snake_case , _snake_case : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
_snake_case , _snake_case : List[str] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 64 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
a : Union[str, Any] = grid[0]
for row_n in range(1 , len(_lowercase ) ):
a : Optional[Any] = grid[row_n]
a : Tuple = fill_row(_lowercase , _lowercase )
a : List[Any] = grid[row_n]
return grid[-1][-1]
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : list ) ->list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(_lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
"""simple docstring"""
from math import factorial
A_ = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ ,'''width_multiplier''' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple ,lowercase_ : Dict ,lowercase_ : List[Any]=1_3 ,lowercase_ : Tuple=6_4 ,lowercase_ : Optional[int]=2 ,lowercase_ : Any=3 ,lowercase_ : List[Any]="swish" ,lowercase_ : Optional[Any]=3 ,lowercase_ : Dict=3_2 ,lowercase_ : Union[str, Any]=0.1 ,lowercase_ : int=0.02 ,lowercase_ : List[Any]=True ,lowercase_ : Union[str, Any]=True ,lowercase_ : Optional[int]=1_0 ,lowercase_ : List[str]=None ,lowercase_ : List[Any]=0.25 ,lowercase_ : List[Any]=0.0 ,lowercase_ : Dict=0.0 ,):
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : int = patch_size
lowerCAmelCase__ : Optional[Any] = num_channels
lowerCAmelCase__ : List[str] = make_divisible(5_1_2 * width_multiplier ,divisor=8 )
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : List[Any] = conv_kernel_size
lowerCAmelCase__ : Tuple = output_stride
lowerCAmelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : List[Any] = is_training
lowerCAmelCase__ : List[Any] = num_labels
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Tuple = scope
lowerCAmelCase__ : int = width_multiplier
lowerCAmelCase__ : Any = ffn_dropout
lowerCAmelCase__ : Tuple = attn_dropout
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] ,self.num_labels )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowerCAmelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCAmelCase ( self : Tuple ):
return MobileViTVaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,)
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Dict ,lowercase_ : List[str] ,lowercase_ : Dict ,lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : int = MobileViTVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : int = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def __lowerCAmelCase ( self : Dict ,lowercase_ : List[str] ,lowercase_ : Tuple ,lowercase_ : Optional[Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Dict = self.num_labels
lowerCAmelCase__ : int = MobileViTVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Any ,lowercase_ : int ,lowercase_ : Tuple ,lowercase_ : Optional[Any] ):
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : int = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
lowerCAmelCase__ : Tuple = model(lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = config_and_inputs
lowerCAmelCase__ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[int] = MobileViTVaModelTester(self )
lowerCAmelCase__ : Optional[int] = MobileViTVaConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __lowerCAmelCase ( self : Dict ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __lowerCAmelCase ( self : Dict ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self : Any ):
pass
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(lowercase_ )
lowerCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : int ):
def check_hidden_states_output(lowercase_ : int ,lowercase_ : List[Any] ,lowercase_ : Any ):
lowerCAmelCase__ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(**self._prepare_for_class(lowercase_ ,lowercase_ ) )
lowerCAmelCase__ : str = outputs.hidden_states
lowerCAmelCase__ : List[Any] = 5
self.assertEqual(len(lowercase_ ) ,lowercase_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase__ : Optional[Any] = 2
for i in range(len(lowercase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
check_hidden_states_output(lowercase_ ,lowercase_ ,lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
check_hidden_states_output(lowercase_ ,lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Any = MobileViTVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Optional[Any] ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : int = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
lowercase_ )
lowerCAmelCase__ : Optional[int] = self.default_image_processor
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : int = image_processor(images=lowercase_ ,return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**lowercase_ )
# verify the logits
lowerCAmelCase__ : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase_ )
lowerCAmelCase__ : List[Any] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : int = model.to(lowercase_ )
lowerCAmelCase__ : int = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : int = image_processor(images=lowercase_ ,return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**lowercase_ )
lowerCAmelCase__ : List[Any] = outputs.logits
# verify the logits
lowerCAmelCase__ : str = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : str = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] ,device=lowercase_ ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,lowercase_ ,atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : int = model.to(lowercase_ )
lowerCAmelCase__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : str = image_processor(images=lowercase_ ,return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = outputs.logits.detach().cpu()
lowerCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=lowercase_ ,target_sizes=[(5_0, 6_0)] )
lowerCAmelCase__ : Optional[int] = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=lowercase_ )
lowerCAmelCase__ : int = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape ,lowercase_ )
| 106 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : int ):
"""simple docstring"""
if len(snake_case__ ) < k or k < 0:
raise ValueError("""Invalid Input""" )
_snake_case : Optional[int] = sum(array[:k] )
for i in range(len(snake_case__ ) - k ):
_snake_case : Optional[Any] = current_sum - array[i] + array[i + k]
_snake_case : List[str] = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A_ = [randint(-10_00, 10_00) for i in range(1_00)]
A_ = randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 64 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__lowerCAmelCase : str = 'CompVis/stable-diffusion-v1-1'
__lowerCAmelCase : int = 'CompVis/stable-diffusion-v1-2'
__lowerCAmelCase : List[str] = 'CompVis/stable-diffusion-v1-3'
__lowerCAmelCase : str = 'CompVis/stable-diffusion-v1-4'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCamelCase : StableDiffusionSafetyChecker , __lowerCamelCase : CLIPImageProcessor , __lowerCamelCase : bool = True , ) -> List[Any]:
super()._init_()
a = StableDiffusionPipeline.from_pretrained(__lowerCamelCase )
a = StableDiffusionPipeline.from_pretrained(__lowerCamelCase )
a = StableDiffusionPipeline.from_pretrained(__lowerCamelCase )
a = StableDiffusionPipeline(
vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , requires_safety_checker=__lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Dict[str, Any]:
return {k: getattr(self , __lowerCamelCase ) for k in self.config.keys() if not k.startswith("_" )}
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
self.enable_attention_slicing(__lowerCamelCase )
@torch.no_grad()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : str , ) -> Union[str, Any]:
return self.pipea(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
@torch.no_grad()
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Any , ) -> Tuple:
return self.pipea(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
@torch.no_grad()
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : int , ) -> List[str]:
return self.pipea(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
@torch.no_grad()
def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Tuple , ) -> str:
return self.pipea(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
@torch.no_grad()
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : List[Any] , ) -> List[str]:
a = "cuda" if torch.cuda.is_available() else "cpu"
self.to(__lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a = self.textaimg_sda_a(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
a = self.textaimg_sda_a(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
a = self.textaimg_sda_a(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
a = self.textaimg_sda_a(
prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 107 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''yjernite/retribert-base-uncased''': 5_12,
}
A_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = RetriBertTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: int, a_: int=None, a_: Dict=None, a_: Any=True, a_: int="[UNK]", a_: Any="[SEP]", a_: List[Any]="[PAD]", a_: List[Any]="[CLS]", a_: str="[MASK]", a_: Dict=True, a_: Optional[int]=None, **a_: Tuple, ):
'''simple docstring'''
super().__init__(
a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, )
_snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", a_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", a_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars
):
_snake_case : Dict = getattr(a_, normalizer_state.pop("""type""" ) )
_snake_case : List[Any] = do_lower_case
_snake_case : List[str] = strip_accents
_snake_case : Tuple = tokenize_chinese_chars
_snake_case : Tuple = normalizer_class(**a_ )
_snake_case : List[str] = do_lower_case
def UpperCamelCase_ ( self: Any, a_: str, a_: Optional[int]=None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self: List[str], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
| 64 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[str] =field(
default=lowercase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowercase )} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[str] =field(
default=lowercase , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] =field(
default=lowercase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool =field(
default=lowercase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool =field(
default=lowercase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool =field(default=lowercase , metadata={"help": "Whether ot not to use whole word mask."} )
a : float =field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float =field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int =field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int =field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool =field(
default=lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def a__ ( SCREAMING_SNAKE_CASE : DataTrainingArguments , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[str] = None , ):
'''simple docstring'''
def _dataset(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase : int = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
lowerCAmelCase : Dict = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
lowerCAmelCase : int = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
lowerCAmelCase : Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase : Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase : List[Any] = (
get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase : Union[str, Any] = (
get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , evaluate=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase : int = DataCollatorForPermutationLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase : Any = DataCollatorForWholeWordMask(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase : List[str] = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase : Any = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , prediction_loss_only=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : str = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase : List[str] = trainer.evaluate()
lowerCAmelCase : Union[str, Any] = math.exp(eval_output["eval_loss"] )
lowerCAmelCase : Tuple = {"perplexity": perplexity}
lowerCAmelCase : int = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(SCREAMING_SNAKE_CASE )
return results
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 108 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"add_prefix_space": True}
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : List[Any] = {"""unk_token""": """<unk>"""}
_snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def UpperCamelCase_ ( self: Any, **a_: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Any, **a_: str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = """lower newer"""
_snake_case : Tuple = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
_snake_case : Optional[Any] = """lower newer"""
_snake_case : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : int = tokenizer.tokenize(a_, add_prefix_space=a_ )
self.assertListEqual(a_, a_ )
_snake_case : str = tokens + [tokenizer.unk_token]
_snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : int = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : Dict = """lower newer"""
# Testing tokenization
_snake_case : Dict = tokenizer.tokenize(a_, add_prefix_space=a_ )
_snake_case : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids without special tokens
_snake_case : Optional[Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : Tuple = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids with special tokens
_snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : int = tokenizer.encode(a_, add_prefix_space=a_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
# Testing the unknown token
_snake_case : Tuple = tokens + [rust_tokenizer.unk_token]
_snake_case : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Dict, *a_: Dict, **a_: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int, a_: List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
# Simple input
_snake_case : Any = """This is a simple input"""
_snake_case : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Optional[int] = ("""This is a simple input""", """This is a pair""")
_snake_case : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
# Pair input
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="""<pad>""" )
# Simple input
_snake_case : List[Any] = """This is a simple input"""
_snake_case : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
_snake_case : Any = ("""This is a simple input""", """This is a pair""")
_snake_case : str = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_snake_case : str = tokenizer.pad_token_id
_snake_case : Optional[int] = tokenizer(a_, padding="""max_length""", max_length=30, return_tensors="""np""" )
_snake_case : Dict = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
_snake_case : Tuple = tokenizer(*a_, padding="""max_length""", max_length=60, return_tensors="""np""" )
_snake_case : Optional[Any] = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = """$$$"""
_snake_case : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=a_, add_bos_token=a_ )
_snake_case : str = """This is a simple input"""
_snake_case : int = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Tuple = tokenizer(a_ )
_snake_case : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0], a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case : Optional[int] = tokenizer.decode(out_s.input_ids )
_snake_case : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_snake_case : Dict = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_snake_case : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_snake_case : Optional[Any] = tokenizer.encode(a_ )
_snake_case : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_snake_case : Optional[Any] = tokenizer.decode(a_, truncate_before_pattern=a_ )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
| 64 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( UpperCamelCase : str ):
def decorator(UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : List[str] = getattr(UpperCamelCase , """handle_key""" , [] )
handle += [key]
setattr(UpperCamelCase , """handle_key""" , UpperCamelCase )
return func
return decorator
def _snake_case ( *UpperCamelCase : List[str] ):
def decorator(UpperCamelCase : str ):
UpperCAmelCase : Tuple = getattr(UpperCamelCase , """handle_key""" , [] )
handle += keys
setattr(UpperCamelCase , """handle_key""" , UpperCamelCase )
return func
return decorator
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __new__( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = super().__new__(cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not hasattr(_SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(_SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(_SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase : Any = getattr(_SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
UpperCAmelCase : Optional[Any] = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase : Optional[Any] = ord(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = cls.key_handler.get(_SCREAMING_SNAKE_CASE )
if handler:
UpperCAmelCase : Tuple = char
return handler(cls )
else:
return None
def _snake_case ( cls : Tuple ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 109 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A_ = re.compile(r'''\s+''')
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(snake_case__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Any = [len(snake_case__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )}
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str]=5 ):
"""simple docstring"""
_snake_case : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_snake_case : Tuple = example["""content"""].splitlines()
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=0.05 ):
"""simple docstring"""
_snake_case : Optional[Any] = ["""unit tests""", """test file""", """configuration file"""]
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : Dict = 0
_snake_case : str = 0
# first test
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case : Optional[int] = example["""content"""].count("""\n""" )
_snake_case : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = ["""def """, """class """, """for """, """while """]
_snake_case : str = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str]=4 ):
"""simple docstring"""
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : str = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Optional[Any] = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""]
_snake_case : Optional[Any] = len(example["""content"""] ) / len(snake_case__ )
return {"ratio": ratio}
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[int] = {}
results.update(get_hash(snake_case__ ) )
results.update(line_stats(snake_case__ ) )
results.update(alpha_stats(snake_case__ ) )
results.update(char_token_ratio(snake_case__ ) )
results.update(is_autogenerated(snake_case__ ) )
results.update(is_config_or_test(snake_case__ ) )
results.update(has_no_keywords(snake_case__ ) )
results.update(has_few_assignments(snake_case__ ) )
return results
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if not check_uniques(snake_case__ , snake_case__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """rb""" ) as f_in:
with gzip.open(str(snake_case__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case__ , snake_case__ )
os.unlink(snake_case__ )
# Settings
A_ = HfArgumentParser(PreprocessingArguments)
A_ = parser.parse_args()
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A_ = time.time()
A_ = load_dataset(args.dataset_name, split='''train''')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
A_ = time.time()
A_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
A_ = set(ds.unique('''hash'''))
A_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
A_ = time.time()
A_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A_ = time.time()
A_ , A_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
A_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
A_ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
A_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A_ = str(data_dir / F'''file-{file_number+1:012}.json''')
A_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 64 | 0 |
from __future__ import annotations
class _a :
def __init__( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = text, pattern
lowercase__ , lowercase__ = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase_ ( self: List[Any] ) -> list[int]:
"""simple docstring"""
lowercase__ = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase__ = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
lowercase__ = self.match_in_pattern(self.text[mismatch_index] )
lowercase__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase = 'ABAABA'
lowerCAmelCase = 'AB'
lowerCAmelCase = BoyerMooreSearch(text, pattern)
lowerCAmelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 110 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = ort.SessionOptions()
_snake_case : Union[str, Any] = False
return options
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_snake_case : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""onnx""", safety_checker=a_, feature_extractor=a_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[Any] = """A red cat sitting on a park bench"""
_snake_case : Optional[int] = np.random.RandomState(0 )
_snake_case : Any = pipe(
prompt=a_, image=a_, mask_image=a_, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=a_, output_type="""np""", )
_snake_case : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 64 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__a)
class UpperCamelCase_ ( __a):
"""simple docstring"""
snake_case__ : Union[str, Any] = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
snake_case__ : int = Features({"text": Value("string")})
snake_case__ : Optional[Any] = Features({})
snake_case__ : Any = "text"
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
return {self.text_column: "text"}
| 54 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_snake_case : int = value
elif weight_type == "weight_g":
_snake_case : str = value
elif weight_type == "weight_v":
_snake_case : Tuple = value
elif weight_type == "bias":
_snake_case : List[str] = value
else:
_snake_case : int = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : List[Any] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_snake_case : Optional[Any] = None
for name, value in fairseq_dict.items():
_snake_case : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : Dict = True
elif name.split(""".""" )[0] == "proj":
_snake_case : Dict = fairseq_model.proj
_snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : Dict = True
if "*" in mapped_key:
_snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : str = """weight_g"""
elif "weight_v" in name:
_snake_case : Optional[Any] = """weight_v"""
elif "bias" in name:
_snake_case : Union[str, Any] = """bias"""
elif "weight" in name:
_snake_case : int = """weight"""
else:
_snake_case : Optional[int] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = full_name.split("""conv_layers.""" )[-1]
_snake_case : Optional[int] = name.split(""".""" )
_snake_case : List[str] = int(items[0] )
_snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_snake_case : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_snake_case : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_snake_case : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = emb.weight.shape
_snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Any = f.readlines()
_snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines]
_snake_case : str = len(snake_case__ )
_snake_case : Tuple = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
_snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ )
_snake_case : List[str] = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
_snake_case : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_snake_case : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
_snake_case : Any = WavaVecaModel(snake_case__ )
_snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
_snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ )
_snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
_snake_case : Any = False
# add projection layer
_snake_case : int = nn.Parameter(projection_layer.weight )
_snake_case : Any = nn.Parameter(projection_layer.bias )
_snake_case : Any = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(snake_case__ , snake_case__ )
_snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) )
tokenizer.save_pretrained(snake_case__ )
_snake_case : str = hf_wavavec.config.to_dict()
_snake_case : List[str] = tokenizer.pad_token_id
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Union[str, Any] = tokenizer.eos_token_id
_snake_case : Optional[Any] = """speech_to_text_2"""
_snake_case : Optional[int] = """wav2vec2"""
_snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 64 | 0 |
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCamelCase_ : Optional[Any] = """scheduler_config.json"""
class __A ( __a ):
"""simple docstring"""
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = 5
__lowerCAmelCase = 6
__lowerCAmelCase = 7
__lowerCAmelCase = 8
__lowerCAmelCase = 9
__lowerCAmelCase = 10
__lowerCAmelCase = 11
__lowerCAmelCase = 12
__lowerCAmelCase = 13
__lowerCAmelCase = 14
@dataclass
class __A ( __a ):
"""simple docstring"""
__lowerCAmelCase = 42
class __A :
"""simple docstring"""
__lowerCAmelCase = SCHEDULER_CONFIG_NAME
__lowerCAmelCase = []
__lowerCAmelCase = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls , __A = None , __A = None , __A=False , **__A , ) -> Union[str, Any]:
a =cls.load_config(
pretrained_model_name_or_path=a_ , subfolder=a_ , return_unused_kwargs=a_ , return_commit_hash=a_ , **a_ , )
return cls.from_config(a_ , return_unused_kwargs=a_ , **a_ )
def SCREAMING_SNAKE_CASE ( self , __A , __A = False , **__A ) -> Optional[int]:
self.save_config(save_directory=a_ , push_to_hub=a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Union[str, Any]:
a =list(set([cls.__name__] + cls._compatibles ) )
a =importlib.import_module(__name__.split('''.''' )[0] )
a =[
getattr(a_ , a_ ) for c in compatible_classes_str if hasattr(a_ , a_ )
]
return compatible_classes | 81 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ):
"""simple docstring"""
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : Any ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : List[Any] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : str = 16
elif accelerator.mixed_precision != "no":
_snake_case : Optional[int] = 8
else:
_snake_case : Optional[int] = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
_snake_case : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
_snake_case : List[Any] = 2
# Initialize accelerator
_snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config["""lr"""]
_snake_case : str = int(config["""num_epochs"""] )
_snake_case : Union[str, Any] = int(config["""seed"""] )
_snake_case : Union[str, Any] = int(config["""batch_size"""] )
_snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ )
_snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
_snake_case : str = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : int = model(**snake_case__ )
_snake_case : str = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : int = model(**snake_case__ )
_snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
_snake_case : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : Dict = parser.parse_args()
_snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 64 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 50_00_00
a__, a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def lowercase ( SCREAMING_SNAKE_CASE__ : datasets.Dataset , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
_snake_case : Tuple = dataset.map(**snake_case__ )
@get_duration
def lowercase ( SCREAMING_SNAKE_CASE__ : datasets.Dataset , **SCREAMING_SNAKE_CASE__ : Any ) -> str:
_snake_case : List[str] = dataset.filter(**snake_case__ )
def lowercase ( ) -> str:
_snake_case : Dict = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case : Dict = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
_snake_case : List[Any] = generate_example_dataset(
os.path.join(snake_case__ , """dataset.arrow""" ) , snake_case__ , num_examples=snake_case__ )
_snake_case : List[Any] = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=snake_case__ )
def tokenize(SCREAMING_SNAKE_CASE__ : Optional[int] ):
return tokenizer(examples["""text"""] )
_snake_case : str = map(snake_case__ )
_snake_case : Optional[int] = map(snake_case__ , batched=snake_case__ )
_snake_case : int = map(snake_case__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""numpy""" ):
_snake_case : Dict = map(snake_case__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""pandas""" ):
_snake_case : List[str] = map(snake_case__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
_snake_case : Union[str, Any] = map(snake_case__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
_snake_case : List[str] = map(snake_case__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=snake_case__ )
_snake_case : Dict = map(snake_case__ , function=snake_case__ , batched=snake_case__ )
_snake_case : List[str] = filter(snake_case__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(snake_case__ , """wb""" ) as f:
f.write(json.dumps(snake_case__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 317 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _lowerCamelCase( __a ):
lowercase_ : str = 0
lowercase_ : int = False
lowercase_ : Optional[int] = 3.0
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs(), {})
self.assertDictEqual(MockClass(a=2).to_kwargs(), {'a': 2})
self.assertDictEqual(MockClass(a=2, b=a_).to_kwargs(), {'a': 2, 'b': True})
self.assertDictEqual(MockClass(a=2, c=2.2_5).to_kwargs(), {'a': 2, 'c': 2.2_5})
@require_cuda
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : int = GradScalerKwargs(init_scale=10_24, growth_factor=2)
AcceleratorState._reset_state()
_lowercase : str = Accelerator(mixed_precision='fp16', kwargs_handlers=[scaler_handler])
print(accelerator.use_fpaa)
_lowercase : Union[str, Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale, 10_24.0)
self.assertEqual(scaler._growth_factor, 2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor, 0.5)
self.assertEqual(scaler._growth_interval, 20_00)
self.assertEqual(scaler._enabled, a_)
@require_multi_gpu
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
execute_subprocess_async(a_, env=os.environ.copy())
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
SCREAMING_SNAKE_CASE : Dict = Accelerator(kwargs_handlers=[ddp_scaler])
SCREAMING_SNAKE_CASE : str = torch.nn.Linear(100, 200)
SCREAMING_SNAKE_CASE : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
SCREAMING_SNAKE_CASE : List[Any] = ""
SCREAMING_SNAKE_CASE : Any = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 21 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A_ = logging.get_logger(__name__)
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = None
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Tuple, a_: int, a_: int, a_: str, **a_: Dict ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any], a_: List[str] ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCamelCase_ ( cls: Tuple ):
'''simple docstring'''
return f"`pip install {cls.pip_package or cls.name}`"
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "optuna"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: int, a_: str, **a_: List[str] ):
'''simple docstring'''
return run_hp_search_optuna(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Any ):
'''simple docstring'''
return default_hp_space_optuna(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "ray"
lowercase__ = "'ray[tune]'"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def UpperCamelCase_ ( self: int, a_: Optional[Any], a_: int, a_: str, **a_: List[Any] ):
'''simple docstring'''
return run_hp_search_ray(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Tuple ):
'''simple docstring'''
return default_hp_space_ray(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "sigopt"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: str, **a_: int ):
'''simple docstring'''
return run_hp_search_sigopt(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: List[str] ):
'''simple docstring'''
return default_hp_space_sigopt(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "wandb"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: str, **a_: Union[str, Any] ):
'''simple docstring'''
return run_hp_search_wandb(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Any ):
'''simple docstring'''
return default_hp_space_wandb(a_ )
A_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case__ ) > 0:
_snake_case : Any = available_backends[0].name
if len(snake_case__ ) > 1:
logger.info(
F"{len(snake_case__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 64 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowerCamelCase = pytest.mark.integration
@require_faiss
class UpperCAmelCase ( __a ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : str = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(a_ ) for x in np.arange(30 ).tolist()]} )
return dset
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]:
'''simple docstring'''
import faiss
snake_case : Dataset = self._create_dummy_dataset()
snake_case : Any = dset.map(
lambda snake_case__ , snake_case__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=a_ , keep_in_memory=a_ )
snake_case : List[Any] = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case : Optional[Any] = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
import faiss
snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
snake_case : Any = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
import faiss
snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
snake_case : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(a_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
snake_case : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
snake_case : Tuple = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
snake_case : List[str] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
snake_case : Dict = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=a_ )
snake_case : int = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class UpperCAmelCase ( __a ):
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
import faiss
snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
snake_case : List[Any] = np.zeros(5 , dtype=np.floataa )
snake_case : List[str] = 1
snake_case : str = index.search(a_ )
self.assertRaises(a_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
snake_case : Union[str, Any] = np.eye(5 , dtype=np.floataa )[::-1]
snake_case : Optional[Any] = index.search_batch(a_ )
self.assertRaises(a_ , index.search_batch , queries[0] )
snake_case : Optional[Any] = [scores[0] for scores in total_scores]
snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , a_ )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
import faiss
snake_case : List[str] = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
snake_case : Any = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(a_ ):
snake_case : Dict = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
import faiss
snake_case : Any = faiss.IndexFlat(5 )
snake_case : Tuple = FaissIndex(custom_index=a_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
import faiss
snake_case : List[str] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
index.save(tmp_file.name )
snake_case : str = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
snake_case : int = np.zeros(5 , dtype=np.floataa )
snake_case : List[str] = 1
snake_case : List[Any] = index.search(a_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCamelCase ( __lowerCamelCase : List[str] ):
import faiss
snake_case : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
snake_case : Any = """index.faiss"""
snake_case : int = f"""mock://{index_name}"""
index.save(snake_case__ , storage_options=mockfs.storage_options )
snake_case : Optional[Any] = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
snake_case : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
snake_case : List[str] = 1
snake_case : str = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCAmelCase ( __a ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
snake_case : Tuple = Elasticsearch()
snake_case : Union[str, Any] = {"""acknowledged""": True}
snake_case : List[str] = ElasticSearchIndex(es_client=a_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
snake_case : str = """foo"""
snake_case : Dict = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
snake_case : Tuple = index.search(a_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
snake_case : Union[str, Any] = """foo"""
snake_case : Tuple = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
snake_case : int = index.search(a_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
snake_case : str = ["""foo""", """bar""", """foobar"""]
snake_case : Dict = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
snake_case : Tuple = index.search_batch(a_ )
snake_case : List[Any] = [scores[0] for scores in total_scores]
snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([1, 1, 1] , a_ )
# batched queries with timeout
snake_case : Optional[int] = ["""foo""", """bar""", """foobar"""]
snake_case : List[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
snake_case : Tuple = index.search_batch(a_ , request_timeout=30 )
snake_case : Union[str, Any] = [scores[0] for scores in total_scores]
snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([1, 1, 1] , a_ )
| 59 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : str = kwargs.pop("""feature_extractor""" )
_snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
_snake_case : Dict = self.image_processor
_snake_case : Any = False
def __call__( self: Any, *a_: Any, **a_: Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
_snake_case : Dict = kwargs.pop("""images""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : Optional[int] = args[0]
_snake_case : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, *a_, **a_ )
if text is not None:
_snake_case : Tuple = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : List[str] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case : Any = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : int = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : Dict = self.tokenizer.get_added_vocab()
_snake_case : str = {}
while tokens:
_snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE )
if start_token is None:
break
_snake_case : List[Any] = start_token.group(1 )
_snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE )
_snake_case : Dict = start_token.group()
if end_token is None:
_snake_case : List[Any] = tokens.replace(a_, """""" )
else:
_snake_case : List[str] = end_token.group()
_snake_case : str = re.escape(a_ )
_snake_case : str = re.escape(a_ )
_snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE )
if content is not None:
_snake_case : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ )
if value:
if len(a_ ) == 1:
_snake_case : List[str] = value[0]
_snake_case : List[str] = value
else: # leaf nodes
_snake_case : Tuple = []
for leaf in content.split(r"""<sep/>""" ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : int = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
_snake_case : int = output[key][0]
_snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : list ) -> str:
"""simple docstring"""
def merge(_UpperCamelCase : list , _UpperCamelCase : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE =len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 47 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __a ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (DDIMParallelScheduler,)
_UpperCamelCase : List[str] = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
lowercase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**a_ )
return config
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**a_ )
lowercase = scheduler_class(**a_ )
lowercase = 10, 0.0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for t in scheduler.timesteps:
lowercase = model(a_ , a_ )
lowercase = scheduler.step(a_ , a_ , a_ , a_ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=a_ )
def SCREAMING_SNAKE_CASE__ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a_ )
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(steps_offset=1 )
lowercase = scheduler_class(**a_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def SCREAMING_SNAKE_CASE__ ( self ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def SCREAMING_SNAKE_CASE__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def SCREAMING_SNAKE_CASE__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def SCREAMING_SNAKE_CASE__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a_ )
def SCREAMING_SNAKE_CASE__ ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=a_ )
def SCREAMING_SNAKE_CASE__ ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=a_ )
def SCREAMING_SNAKE_CASE__ ( self ):
self.check_over_configs(thresholding=a_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=a_ , prediction_type=a_ , sample_max_value=a_ , )
def SCREAMING_SNAKE_CASE__ ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=a_ )
def SCREAMING_SNAKE_CASE__ ( self ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=a_ , num_inference_steps=a_ )
def SCREAMING_SNAKE_CASE__ ( self ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=a_ , eta=a_ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**a_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**a_ )
lowercase = 10, 0.0
scheduler.set_timesteps(a_ )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = self.dummy_sample_deter + 0.1
lowercase = self.dummy_sample_deter - 0.1
lowercase = samplea.shape[0]
lowercase = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase = torch.arange(a_ )[0:3, None].repeat(1 , a_ )
lowercase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase = scheduler.batch_step_no_noise(a_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , a_ )
lowercase = torch.sum(torch.abs(a_ ) )
lowercase = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4_982 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.full_loop()
lowercase = torch.sum(torch.abs(a_ ) )
lowercase = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1E-2
assert abs(result_mean.item() - 0.223_967 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.full_loop(prediction_type='v_prediction' )
lowercase = torch.sum(torch.abs(a_ ) )
lowercase = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1E-2
assert abs(result_mean.item() - 0.0_684 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
lowercase = torch.sum(torch.abs(a_ ) )
lowercase = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1E-2
assert abs(result_mean.item() - 0.1_951 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
lowercase = torch.sum(torch.abs(a_ ) )
lowercase = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1E-2
assert abs(result_mean.item() - 0.1_941 ) < 1E-3
| 195 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = CodeGenTokenizer
def __init__( self: Union[str, Any], a_: List[Any]=None, a_: str=None, a_: str=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: str="<|endoftext|>", a_: List[Any]=False, **a_: List[str], ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
if kwargs.pop("""add_bos_token""", a_ ):
_snake_case : str = kwargs.pop("""name_or_path""", """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Dict = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Dict = add_prefix_space
_snake_case : str = pre_tok_class(**a_ )
_snake_case : List[Any] = add_prefix_space
def UpperCamelCase_ ( self: Any, *a_: Any, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], *a_: Any, **a_: List[str] ):
'''simple docstring'''
_snake_case : Dict = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: str, a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: List[str], ):
'''simple docstring'''
_snake_case : Any = super().decode(
token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, )
if truncate_before_pattern is not None and len(a_ ) > 0:
_snake_case : List[str] = self.truncate(a_, a_ )
return decoded_text
def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Optional[Any] ):
'''simple docstring'''
def find_re(a_: Dict, a_: str, a_: Union[str, Any] ):
_snake_case : Any = pattern.search(a_, a_ )
return m.start() if m else -1
_snake_case : Tuple = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern]
_snake_case : List[Any] = list(re.finditer("""^print""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : int = completion[: prints[1].start()]
_snake_case : List[str] = list(re.finditer("""^def""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : List[Any] = completion[: defs[1].start()]
_snake_case : int = 0
_snake_case : List[Any] = [
pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 64 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ) -> Optional[Any]:
debug_launcher(test_script.main )
def A__ ( self: List[str] ) -> Dict:
debug_launcher(test_ops.main )
| 345 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_snake_case : Tuple = 1_92
_snake_case : Any = 7_68
_snake_case : Any = 12
_snake_case : List[Any] = 3
_snake_case : int = [8_00, 13_33]
_snake_case : Tuple = False
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = 3_30
_snake_case : List[str] = 14
_snake_case : List[str] = 6
_snake_case : Union[str, Any] = 13_20
elif "yolos_s" in yolos_name:
_snake_case : Union[str, Any] = 3_84
_snake_case : List[str] = 15_36
_snake_case : Any = 12
_snake_case : Optional[int] = 6
elif "yolos_b" in yolos_name:
_snake_case : Dict = [8_00, 13_44]
_snake_case : str = 91
_snake_case : Optional[Any] = """huggingface/label-files"""
_snake_case : str = """coco-detection-id2label.json"""
_snake_case : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[str] = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosConfig , snake_case__ : bool = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_snake_case : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[: config.hidden_size, :]
_snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
_snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Tuple = in_proj_weight[-config.hidden_size :, :]
_snake_case : List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
if "backbone" in name:
_snake_case : str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
_snake_case : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
_snake_case : str = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
_snake_case : str = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
_snake_case : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_snake_case : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
_snake_case : str = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_snake_case : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : int = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
_snake_case : Union[str, Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
_snake_case : str = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
_snake_case : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosForObjectDetection ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : List[str] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
_snake_case : Optional[Any] = key.split(""".""" )
_snake_case : Optional[Any] = int(key_split[2] )
_snake_case : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_snake_case : str = val[:dim, :]
_snake_case : Optional[Any] = val[
dim : dim * 2, :
]
_snake_case : Optional[Any] = val[-dim:, :]
else:
_snake_case : Dict = val[:dim]
_snake_case : Any = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
else:
_snake_case : Tuple = val
return orig_state_dict
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
_snake_case : Optional[Any] = get_yolos_config(snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
_snake_case : Optional[Any] = YolosForObjectDetection(snake_case__ )
model.eval()
_snake_case : Optional[Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
_snake_case : List[str] = 8_00 if yolos_name != """yolos_ti""" else 5_12
_snake_case : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
_snake_case , _snake_case : Optional[int] = outputs.logits, outputs.pred_boxes
_snake_case , _snake_case : Dict = None, None
if yolos_name == "yolos_ti":
_snake_case : Optional[Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
_snake_case : Tuple = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
_snake_case : List[str] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
_snake_case : List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
_snake_case : Dict = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
_snake_case : Union[str, Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
_snake_case : Optional[Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
_snake_case : int = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
_snake_case : Optional[int] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
_snake_case : Dict = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
_snake_case : str = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization="""hustvl""" )
model.push_to_hub(snake_case__ , organization="""hustvl""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 64 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 271 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str]=False ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : List[Any] = """"""
else:
_snake_case : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
_snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
_snake_case : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Union[str, Any] = val
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
_snake_case : str = ViTMSNConfig()
_snake_case : Any = 10_00
_snake_case : Tuple = """datasets/huggingface/label-files"""
_snake_case : Dict = """imagenet-1k-id2label.json"""
_snake_case : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , """r""" ) )
_snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[Any] = idalabel
_snake_case : str = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_snake_case : Tuple = 3_84
_snake_case : Dict = 15_36
_snake_case : Tuple = 6
elif "l16" in checkpoint_url:
_snake_case : Any = 10_24
_snake_case : int = 40_96
_snake_case : str = 24
_snake_case : Optional[int] = 16
_snake_case : List[Any] = 0.1
elif "b4" in checkpoint_url:
_snake_case : Tuple = 4
elif "l7" in checkpoint_url:
_snake_case : int = 7
_snake_case : Dict = 10_24
_snake_case : Optional[Any] = 40_96
_snake_case : Any = 24
_snake_case : Union[str, Any] = 16
_snake_case : Optional[int] = 0.1
_snake_case : int = ViTMSNModel(snake_case__ )
_snake_case : Optional[int] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )["""target_encoder"""]
_snake_case : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
_snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
_snake_case : str = ViTImageProcessor(
size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : Any = image_processor(images=snake_case__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_snake_case : int = model(**snake_case__ )
_snake_case : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_snake_case : Optional[Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
_snake_case : str = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
_snake_case : Optional[int] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
_snake_case : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
_snake_case : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 64 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : Optional[Any] = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 144 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = list(snake_case__ )
_snake_case : List[Any] = list(snake_case__ )
_snake_case : List[Any] = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count += 1
_snake_case : Any = """_"""
if count > 1:
return False
else:
return "".join(snake_case__ )
def UpperCAmelCase__ (snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = []
while True:
_snake_case : Union[str, Any] = ["""$"""] * len(snake_case__ )
_snake_case : int = []
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
_snake_case : List[Any] = compare_string(binary[i] , binary[j] )
if k is False:
_snake_case : Dict = """*"""
_snake_case : List[Any] = """*"""
temp.append("""X""" )
for i in range(len(snake_case__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case__ ) == 0:
return pi
_snake_case : Optional[int] = list(set(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Sequence[float] ):
"""simple docstring"""
_snake_case : Optional[int] = []
for minterm in minterms:
_snake_case : Any = """"""
for _ in range(snake_case__ ):
_snake_case : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case__ )
return temp
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = list(snake_case__ )
_snake_case : List[str] = list(snake_case__ )
_snake_case : Tuple = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase__ (snake_case__ : list[list[int]] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : Any = []
_snake_case : Union[str, Any] = [0] * len(snake_case__ )
for i in range(len(chart[0] ) ):
_snake_case : Tuple = 0
_snake_case : str = -1
for j in range(len(snake_case__ ) ):
if chart[j][i] == 1:
count += 1
_snake_case : Union[str, Any] = j
if count == 1:
_snake_case : Union[str, Any] = 1
for i in range(len(snake_case__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case__ ) ):
_snake_case : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
_snake_case : Optional[int] = 0
_snake_case : str = -1
_snake_case : Any = 0
for i in range(len(snake_case__ ) ):
_snake_case : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_snake_case : Dict = count_n
_snake_case : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case__ ) ):
_snake_case : Optional[Any] = 0
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )]
for i in range(len(snake_case__ ) ):
_snake_case : Any = prime_implicants[i].count("""_""" )
for j in range(len(snake_case__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ):
_snake_case : Tuple = 1
return chart
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = int(input("""Enter the no. of variables\n""" ) )
_snake_case : List[str] = [
float(snake_case__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_snake_case : List[str] = decimal_to_binary(snake_case__ , snake_case__ )
_snake_case : str = check(snake_case__ )
print("""Prime Implicants are:""" )
print(snake_case__ )
_snake_case : int = prime_implicant_chart(snake_case__ , snake_case__ )
_snake_case : str = selection(snake_case__ , snake_case__ )
print("""Essential Prime Implicants are:""" )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 64 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.