code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def a ( __a , __a , __a=8 ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Any = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCamelCase__ :Tuple = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if latents is None:
UpperCamelCase__ :str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCamelCase__ :Dict = latents.to(UpperCamelCase_ )
UpperCamelCase__ :Any = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
UpperCamelCase__ :List[str] = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , truncation=UpperCamelCase_ , max_length=77 , return_attention_mask=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :str = text_inputs.input_ids
UpperCamelCase__ :List[str] = self.tokenizer(UpperCamelCase_ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Tuple = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase__ :Union[str, Any] = text_input_ids.to(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = text_inputs.attention_mask.to(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.text_encoder(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
UpperCamelCase__ :Tuple = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
UpperCamelCase__ :List[Any] = text_encoder_hidden_states.repeat_interleave(UpperCamelCase_ , dim=0 )
UpperCamelCase__ :str = text_mask.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ :List[str]
if negative_prompt is None:
UpperCamelCase__ :Dict = [''''''] * batch_size
elif type(UpperCamelCase_ ) is not type(UpperCamelCase_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase_ )} !='''
F''' {type(UpperCamelCase_ )}.''' )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :List[str] = [negative_prompt]
elif batch_size != len(UpperCamelCase_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
UpperCamelCase__ :Tuple = negative_prompt
UpperCamelCase__ :int = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=77 , truncation=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :List[str] = uncond_input.input_ids.to(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = uncond_input.attention_mask.to(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.text_encoder(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ :Optional[int] = negative_prompt_embeds.shape[1]
UpperCamelCase__ :List[Any] = negative_prompt_embeds.repeat(1 , UpperCamelCase_ )
UpperCamelCase__ :Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ )
UpperCamelCase__ :List[str] = uncond_text_encoder_hidden_states.shape[1]
UpperCamelCase__ :Tuple = uncond_text_encoder_hidden_states.repeat(1 , UpperCamelCase_ , 1 )
UpperCamelCase__ :List[str] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
UpperCamelCase__ :List[Any] = uncond_text_mask.repeat_interleave(UpperCamelCase_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ :Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCamelCase__ :Optional[int] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCamelCase__ :int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self , UpperCamelCase_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCamelCase__ :int = torch.device(F'''cuda:{gpu_id}''' )
UpperCamelCase__ :Tuple = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCamelCase__ :Tuple = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase__ :str = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
if self.safety_checker is not None:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = cpu_offload_with_hook(self.safety_checker , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
UpperCamelCase__ :Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 512 , UpperCamelCase_ = 512 , UpperCamelCase_ = 100 , UpperCamelCase_ = 4.0 , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :int = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = len(UpperCamelCase_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}''' )
UpperCamelCase__ :Optional[int] = self._execution_device
UpperCamelCase__ :Any = batch_size * num_images_per_prompt
UpperCamelCase__ :List[Any] = guidance_scale > 1.0
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self._encode_prompt(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[Any] = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = torch.cat(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ :int = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
UpperCamelCase__ :List[Any] = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
UpperCamelCase__ :List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
UpperCamelCase__ :int = self.scheduler.timesteps
UpperCamelCase__ :List[Any] = self.unet.config.in_channels
UpperCamelCase__ , UpperCamelCase__ :Dict = get_new_h_w(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
UpperCamelCase__ :List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ :List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ :Any = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
UpperCamelCase__ :Optional[int] = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = noise_pred.chunk(2 )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = variance_pred.chunk(2 )
UpperCamelCase__ :int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase__ :Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase__ , UpperCamelCase__ :int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ :Optional[int] = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , ).prev_sample
# post-processing
UpperCamelCase__ :List[Any] = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCamelCase__ :List[str] = image * 0.5 + 0.5
UpperCamelCase__ :Optional[Any] = image.clamp(0 , 1 )
UpperCamelCase__ :List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ :str = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ ) | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__snake_case = logging.get_logger('''transformers.models.speecht5''')
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
hf_model.apply_weight_norm()
UpperCamelCase__ :List[str] = checkpoint['''input_conv.weight_g''']
UpperCamelCase__ :Any = checkpoint['''input_conv.weight_v''']
UpperCamelCase__ :Dict = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
UpperCamelCase__ :Optional[Any] = checkpoint[f'''upsamples.{i}.1.weight_g''']
UpperCamelCase__ :List[str] = checkpoint[f'''upsamples.{i}.1.weight_v''']
UpperCamelCase__ :Dict = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase__ :int = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase__ :Dict = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase__ :Any = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase__ :Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase__ :Optional[Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase__ :str = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase__ :Tuple = checkpoint['''output_conv.1.weight_g''']
UpperCamelCase__ :Dict = checkpoint['''output_conv.1.weight_v''']
UpperCamelCase__ :List[str] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def a ( __a , __a , __a , __a=None , __a=None , ) -> str:
'''simple docstring'''
if config_path is not None:
UpperCamelCase__ :int = SpeechTaHifiGanConfig.from_pretrained(__a )
else:
UpperCamelCase__ :int = SpeechTaHifiGanConfig()
UpperCamelCase__ :Any = SpeechTaHifiGan(__a )
UpperCamelCase__ :Tuple = torch.load(__a )
load_weights(orig_checkpoint['''model''']['''generator'''] , __a , __a )
UpperCamelCase__ :Optional[int] = np.load(__a )
UpperCamelCase__ :int = stats[0].reshape(-1 )
UpperCamelCase__ :Optional[int] = stats[1].reshape(-1 )
UpperCamelCase__ :str = torch.from_numpy(__a ).float()
UpperCamelCase__ :List[Any] = torch.from_numpy(__a ).float()
model.save_pretrained(__a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 97 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__snake_case = input('''Enter image url: ''').strip()
print(F"""Downloading image from {url} ...""")
__snake_case = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
__snake_case = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
__snake_case = requests.get(image_url).content
__snake_case = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""") | 97 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 1 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
__snake_case = get_logger(__name__)
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase__ :str = module._original_module if isinstance(UpperCamelCase_ , _PatchedModuleObj ) else module
class lowercase :
"""simple docstring"""
_a = []
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = obj
UpperCamelCase__ :Tuple = target
UpperCamelCase__ :int = new
UpperCamelCase__ :Union[str, Any] = target.split('''.''' )[0]
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Any = attrs or []
def __enter__( self ):
'''simple docstring'''
*UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCamelCase_ ) ):
try:
UpperCamelCase__ :List[Any] = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
UpperCamelCase__ :int = getattr(self.obj , UpperCamelCase_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCamelCase_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
UpperCamelCase__ :Tuple = obj_attr
# patch at top level
setattr(self.obj , UpperCamelCase_ , _PatchedModuleObj(UpperCamelCase_ , attrs=self.attrs ) )
UpperCamelCase__ :List[Any] = getattr(self.obj , UpperCamelCase_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCamelCase_ , UpperCamelCase_ , _PatchedModuleObj(getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , attrs=self.attrs ) )
UpperCamelCase__ :Tuple = getattr(UpperCamelCase_ , UpperCamelCase_ )
# finally set the target attribute
setattr(UpperCamelCase_ , UpperCamelCase_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
UpperCamelCase__ :Dict = getattr(import_module('''.'''.join(UpperCamelCase_ ) ) , UpperCamelCase_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , UpperCamelCase_ ) is attr_value:
UpperCamelCase__ :Optional[int] = getattr(self.obj , UpperCamelCase_ )
setattr(self.obj , UpperCamelCase_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
UpperCamelCase__ :Optional[Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , UpperCamelCase_ , self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *UpperCamelCase_ ):
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , UpperCamelCase_ , self.original.pop(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 97 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :Tuple = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :List[str] = num_channels
UpperCamelCase__ :List[str] = min_resolution
UpperCamelCase__ :Dict = max_resolution
UpperCamelCase__ :Optional[int] = do_resize
UpperCamelCase__ :Any = size
UpperCamelCase__ :List[str] = do_rescale
UpperCamelCase__ :List[str] = rescale_factor
UpperCamelCase__ :List[Any] = do_normalize
UpperCamelCase__ :Optional[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Optional[int] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :Any = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :int = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :Any = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :List[str] = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :List[Any] = self.size['''shortest_edge''']
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :Union[str, Any] = self.size['''shortest_edge''']
UpperCamelCase__ :Optional[Any] = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Optional[int] = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :Tuple = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = DetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = DetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_rescale''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''rescale_factor''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_pad''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :str = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Optional[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :Dict = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :int = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
UpperCamelCase__ :Tuple = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :int = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Any = json.loads(f.read() )
UpperCamelCase__ :str = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :str = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :Optional[int] = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
UpperCamelCase__ :Tuple = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Optional[int] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :int = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :Dict = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Tuple = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__snake_case = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
__snake_case = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
__snake_case = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def a ( __a ) -> dict[str, int]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a ( __a ) -> str:
'''simple docstring'''
return x[0]
def a ( __a ) -> str:
'''simple docstring'''
UpperCamelCase__ :Any = get_letter_count(__a )
UpperCamelCase__ :dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__a )
UpperCamelCase__ :dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__a )
UpperCamelCase__ :List[Any] = ''''''.join(freq_to_letter[freq] )
UpperCamelCase__ :Tuple = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__a , reverse=__a )
UpperCamelCase__ :list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__a )
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :List[Any] = get_frequency_order(__a )
UpperCamelCase__ :Any = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 1 |
'''simple docstring'''
from math import sqrt
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :int = 0
for i in range(1 , int(sqrt(__a ) + 1 ) ):
if n % i == 0 and i != sqrt(__a ):
total += i + n // i
elif i == sqrt(__a ):
total += i
return total - n
def a ( __a = 10000 ) -> int:
'''simple docstring'''
UpperCamelCase__ :Dict = sum(
i
for i in range(1 , __a )
if sum_of_divisors(sum_of_divisors(__a ) ) == i and sum_of_divisors(__a ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = 42
class lowercase ( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCamelCase_ = 65536 , UpperCamelCase_ = None , UpperCamelCase_ = 2 , UpperCamelCase_ = 2 , UpperCamelCase_ = 0 , UpperCamelCase_ = "fourier" , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = 0.0 , UpperCamelCase_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCamelCase_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCamelCase_ = "UNetMidBlock1D" , UpperCamelCase_ = None , UpperCamelCase_ = (32, 32, 64) , UpperCamelCase_ = None , UpperCamelCase_ = 8 , UpperCamelCase_ = 1 , UpperCamelCase_ = False , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :int = sample_size
# time
if time_embedding_type == "fourier":
UpperCamelCase__ :Any = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCamelCase_ , log=UpperCamelCase_ , flip_sin_to_cos=UpperCamelCase_ )
UpperCamelCase__ :str = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCamelCase__ :int = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCamelCase_ , downscale_freq_shift=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = block_out_channels[0]
if use_timestep_embedding:
UpperCamelCase__ :Optional[Any] = block_out_channels[0] * 4
UpperCamelCase__ :Optional[Any] = TimestepEmbedding(
in_channels=UpperCamelCase_ , time_embed_dim=UpperCamelCase_ , act_fn=UpperCamelCase_ , out_dim=block_out_channels[0] , )
UpperCamelCase__ :int = nn.ModuleList([] )
UpperCamelCase__ :Tuple = None
UpperCamelCase__ :Optional[int] = nn.ModuleList([] )
UpperCamelCase__ :Tuple = None
# down
UpperCamelCase__ :Dict = in_channels
for i, down_block_type in enumerate(UpperCamelCase_ ):
UpperCamelCase__ :Optional[Any] = output_channel
UpperCamelCase__ :Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCamelCase__ :Any = i == len(UpperCamelCase_ ) - 1
UpperCamelCase__ :Optional[Any] = get_down_block(
UpperCamelCase_ , num_layers=UpperCamelCase_ , in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCamelCase_ )
# mid
UpperCamelCase__ :Optional[Any] = get_mid_block(
UpperCamelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCamelCase_ , add_downsample=UpperCamelCase_ , )
# up
UpperCamelCase__ :Any = list(reversed(UpperCamelCase_ ) )
UpperCamelCase__ :List[str] = reversed_block_out_channels[0]
if out_block_type is None:
UpperCamelCase__ :Dict = out_channels
else:
UpperCamelCase__ :Optional[int] = block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase_ ):
UpperCamelCase__ :Tuple = output_channel
UpperCamelCase__ :Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(UpperCamelCase_ ) - 1 else final_upsample_channels
)
UpperCamelCase__ :Dict = i == len(UpperCamelCase_ ) - 1
UpperCamelCase__ :Optional[int] = get_up_block(
UpperCamelCase_ , num_layers=UpperCamelCase_ , in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = output_channel
# out
UpperCamelCase__ :int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
UpperCamelCase__ :str = get_out_block(
out_block_type=UpperCamelCase_ , num_groups_out=UpperCamelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCamelCase_ , act_fn=UpperCamelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = timestep
if not torch.is_tensor(UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ :str = timesteps[None].to(sample.device )
UpperCamelCase__ :int = self.time_proj(UpperCamelCase_ )
if self.config.use_timestep_embedding:
UpperCamelCase__ :Union[str, Any] = self.time_mlp(UpperCamelCase_ )
else:
UpperCamelCase__ :Optional[int] = timestep_embed[..., None]
UpperCamelCase__ :List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
UpperCamelCase__ :Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
UpperCamelCase__ :Optional[int] = ()
for downsample_block in self.down_blocks:
UpperCamelCase__ , UpperCamelCase__ :Tuple = downsample_block(hidden_states=UpperCamelCase_ , temb=UpperCamelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCamelCase__ :Optional[int] = self.mid_block(UpperCamelCase_ , UpperCamelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
UpperCamelCase__ :int = down_block_res_samples[-1:]
UpperCamelCase__ :Optional[int] = down_block_res_samples[:-1]
UpperCamelCase__ :List[Any] = upsample_block(UpperCamelCase_ , res_hidden_states_tuple=UpperCamelCase_ , temb=UpperCamelCase_ )
# 5. post-process
if self.out_block:
UpperCamelCase__ :List[str] = self.out_block(UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCamelCase_ ) | 97 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 | 1 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 1 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler | 97 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def a ( __a = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
UpperCamelCase__ :Optional[Any] = nums[0]
for i in range(1 , len(__a ) ):
UpperCamelCase__ :Tuple = nums[i]
UpperCamelCase__ :Union[str, Any] = max(__a , ans + num , __a )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__snake_case = int(input('''Enter number of elements : ''').strip())
__snake_case = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array)) | 97 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = TextaTextGenerationPipeline(model=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
return generator, ["Something to write", "Something else"]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = generator('''Something there''' )
self.assertEqual(UpperCamelCase_ , [{'''generated_text''': ANY(UpperCamelCase_ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
UpperCamelCase__ :Dict = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
[{'''generated_text''': ANY(UpperCamelCase_ )}, {'''generated_text''': ANY(UpperCamelCase_ )}],
[{'''generated_text''': ANY(UpperCamelCase_ )}, {'''generated_text''': ANY(UpperCamelCase_ )}],
] , )
UpperCamelCase__ :List[str] = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
[{'''generated_text''': ANY(UpperCamelCase_ )}, {'''generated_text''': ANY(UpperCamelCase_ )}],
[{'''generated_text''': ANY(UpperCamelCase_ )}, {'''generated_text''': ANY(UpperCamelCase_ )}],
] , )
with self.assertRaises(UpperCamelCase_ ):
generator(4 )
@require_torch
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
UpperCamelCase__ :Optional[Any] = generator('''Something there''' , do_sample=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [{'''generated_text''': ''''''}] )
UpperCamelCase__ :Union[str, Any] = 3
UpperCamelCase__ :List[str] = generator(
'''Something there''' , num_return_sequences=UpperCamelCase_ , num_beams=UpperCamelCase_ , )
UpperCamelCase__ :Any = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[str] = generator('''This is a test''' , do_sample=UpperCamelCase_ , num_return_sequences=2 , return_tensors=UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
UpperCamelCase__ :Dict = generator.model.config.eos_token_id
UpperCamelCase__ :Any = '''<pad>'''
UpperCamelCase__ :Tuple = generator(
['''This is a test''', '''This is a second test'''] , do_sample=UpperCamelCase_ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase_ , )
self.assertEqual(
UpperCamelCase_ , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
UpperCamelCase__ :Dict = generator('''Something there''' , do_sample=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [{'''generated_text''': ''''''}] ) | 97 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__snake_case = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'ernie_m'
_a = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCamelCase_ = 250002 , UpperCamelCase_ = 768 , UpperCamelCase_ = 12 , UpperCamelCase_ = 12 , UpperCamelCase_ = 3072 , UpperCamelCase_ = "gelu" , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 514 , UpperCamelCase_ = 0.02 , UpperCamelCase_ = 1 , UpperCamelCase_ = 1e-05 , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=0.0 , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Tuple = vocab_size
UpperCamelCase__ :List[Any] = hidden_size
UpperCamelCase__ :int = num_hidden_layers
UpperCamelCase__ :Optional[int] = num_attention_heads
UpperCamelCase__ :int = intermediate_size
UpperCamelCase__ :str = hidden_act
UpperCamelCase__ :str = hidden_dropout_prob
UpperCamelCase__ :Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ :Any = max_position_embeddings
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :str = layer_norm_eps
UpperCamelCase__ :Optional[Any] = classifier_dropout
UpperCamelCase__ :Union[str, Any] = is_decoder
UpperCamelCase__ :Union[str, Any] = act_dropout | 97 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__snake_case = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def a ( __a , __a=100 , __a=" " ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Dict = text.split(__a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__a ) , __a )]
def a ( __a ) -> dict:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__a ):
titles.append(title if title is not None else '''''' )
texts.append(__a )
return {"title": titles, "text": texts}
def a ( __a , __a , __a ) -> dict:
'''simple docstring'''
UpperCamelCase__ :str = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__a , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
UpperCamelCase__ :Union[str, Any] = ctx_encoder(input_ids.to(device=__a ) , return_dict=__a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def a ( __a , __a , __a , ) -> Any:
'''simple docstring'''
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCamelCase__ :str = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCamelCase__ :List[str] = dataset.map(__a , batched=__a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCamelCase__ :int = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__a )
UpperCamelCase__ :str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCamelCase__ :str = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
UpperCamelCase__ :str = dataset.map(
partial(__a , ctx_encoder=__a , ctx_tokenizer=__a ) , batched=__a , batch_size=processing_args.batch_size , features=__a , )
# And finally save your dataset
UpperCamelCase__ :Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCamelCase__ :Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__a )
# And save the index
UpperCamelCase__ :Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
default=str(Path(A__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
_a = field(
default=A__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
_a = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
_a = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
_a = field(
default=str(Path(A__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
default=A__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
_a = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
_a = field(
default=1_28 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__snake_case = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__snake_case = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = 42
_a = 42
_a = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=A__ ):
"""simple docstring"""
_a = ['torch', 'scipy']
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] ) | 97 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'blip_2_vision_model'
def __init__( self , UpperCamelCase_=1408 , UpperCamelCase_=6144 , UpperCamelCase_=39 , UpperCamelCase_=16 , UpperCamelCase_=224 , UpperCamelCase_=14 , UpperCamelCase_="gelu" , UpperCamelCase_=0.00001 , UpperCamelCase_=0.0 , UpperCamelCase_=1e-10 , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :List[Any] = hidden_size
UpperCamelCase__ :int = intermediate_size
UpperCamelCase__ :Tuple = num_hidden_layers
UpperCamelCase__ :Dict = num_attention_heads
UpperCamelCase__ :List[str] = patch_size
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Any = initializer_range
UpperCamelCase__ :int = attention_dropout
UpperCamelCase__ :str = layer_norm_eps
UpperCamelCase__ :Optional[int] = hidden_act
UpperCamelCase__ :List[str] = qkv_bias
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
UpperCamelCase__ :Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
_a = 'blip_2_qformer'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0 , UpperCamelCase_="absolute" , UpperCamelCase_=2 , UpperCamelCase_=1408 , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :str = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Dict = hidden_act
UpperCamelCase__ :Any = intermediate_size
UpperCamelCase__ :Optional[Any] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :str = max_position_embeddings
UpperCamelCase__ :Union[str, Any] = initializer_range
UpperCamelCase__ :Dict = layer_norm_eps
UpperCamelCase__ :Union[str, Any] = position_embedding_type
UpperCamelCase__ :str = cross_attention_frequency
UpperCamelCase__ :List[str] = encoder_hidden_size
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :str = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
UpperCamelCase__ :Any = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
_a = 'blip-2'
_a = True
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=32 , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
UpperCamelCase__ :Optional[int] = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
UpperCamelCase__ :int = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
UpperCamelCase__ :List[str] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
UpperCamelCase__ :Any = BlipaVisionConfig(**UpperCamelCase_ )
UpperCamelCase__ :List[str] = BlipaQFormerConfig(**UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
UpperCamelCase__ :Any = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.text_config.tie_word_embeddings
UpperCamelCase__ :int = self.text_config.is_encoder_decoder
UpperCamelCase__ :List[str] = num_query_tokens
UpperCamelCase__ :Optional[int] = self.vision_config.hidden_size
UpperCamelCase__ :Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCamelCase__ :Optional[Any] = 1.0
UpperCamelCase__ :List[Any] = 0.02
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = copy.deepcopy(self.__dict__ )
UpperCamelCase__ :List[str] = self.vision_config.to_dict()
UpperCamelCase__ :Dict = self.qformer_config.to_dict()
UpperCamelCase__ :List[Any] = self.text_config.to_dict()
UpperCamelCase__ :Dict = self.__class__.model_type
return output | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = 42
class lowercase ( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCamelCase_ = 16 , UpperCamelCase_ = 88 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 1 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 32 , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = "geglu" , UpperCamelCase_ = True , UpperCamelCase_ = True , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[Any] = attention_head_dim
UpperCamelCase__ :Union[str, Any] = num_attention_heads * attention_head_dim
UpperCamelCase__ :Tuple = in_channels
UpperCamelCase__ :Tuple = torch.nn.GroupNorm(num_groups=UpperCamelCase_ , num_channels=UpperCamelCase_ , eps=1e-6 , affine=UpperCamelCase_ )
UpperCamelCase__ :int = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
# 3. Define transformers blocks
UpperCamelCase__ :Any = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dropout=UpperCamelCase_ , cross_attention_dim=UpperCamelCase_ , activation_fn=UpperCamelCase_ , attention_bias=UpperCamelCase_ , double_self_attention=UpperCamelCase_ , norm_elementwise_affine=UpperCamelCase_ , )
for d in range(UpperCamelCase_ )
] )
UpperCamelCase__ :str = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=1 , UpperCamelCase_=None , UpperCamelCase_ = True , ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = hidden_states.shape
UpperCamelCase__ :int = batch_frames // num_frames
UpperCamelCase__ :List[str] = hidden_states
UpperCamelCase__ :Tuple = hidden_states[None, :].reshape(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Tuple = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
UpperCamelCase__ :Tuple = self.norm(UpperCamelCase_ )
UpperCamelCase__ :Any = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.proj_in(UpperCamelCase_ )
# 2. Blocks
for block in self.transformer_blocks:
UpperCamelCase__ :Optional[Any] = block(
UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ , cross_attention_kwargs=UpperCamelCase_ , class_labels=UpperCamelCase_ , )
# 3. Output
UpperCamelCase__ :str = self.proj_out(UpperCamelCase_ )
UpperCamelCase__ :str = (
hidden_states[None, None, :]
.reshape(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
UpperCamelCase__ :List[str] = hidden_states.reshape(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Dict = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCamelCase_ ) | 97 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 1 |
'''simple docstring'''
from statistics import mean
import numpy as np
def a ( __a , __a , __a , __a ) -> list:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = 0
# Number of processes finished
UpperCamelCase__ :Any = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCamelCase__ :Union[str, Any] = [0] * no_of_process
# List to include calculation results
UpperCamelCase__ :Union[str, Any] = [0] * no_of_process
# Sort by arrival time.
UpperCamelCase__ :str = [burst_time[i] for i in np.argsort(__a )]
UpperCamelCase__ :Optional[int] = [process_name[i] for i in np.argsort(__a )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCamelCase__ :Any = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCamelCase__ :List[Any] = arrival_time[i]
UpperCamelCase__ :Tuple = 0
# Index showing the location of the process being performed
UpperCamelCase__ :Dict = 0
# Saves the current response ratio.
UpperCamelCase__ :Optional[Any] = 0
for i in range(0 , __a ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCamelCase__ :List[str] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCamelCase__ :Union[str, Any] = temp
UpperCamelCase__ :Optional[Any] = i
# Calculate the turn around time
UpperCamelCase__ :List[str] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCamelCase__ :Tuple = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def a ( __a , __a , __a , __a ) -> list:
'''simple docstring'''
UpperCamelCase__ :Dict = [0] * no_of_process
for i in range(0 , __a ):
UpperCamelCase__ :Optional[int] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__snake_case = 5
__snake_case = ['''A''', '''B''', '''C''', '''D''', '''E''']
__snake_case = [1, 2, 3, 4, 5]
__snake_case = [1, 2, 3, 4, 5]
__snake_case = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__snake_case = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""") | 97 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__snake_case = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
__snake_case = cvtColor(img, COLOR_BGR2GRAY)
def a ( ) -> int:
'''simple docstring'''
UpperCamelCase__ :List[Any] = cn.convert_to_negative(__a )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ) -> Optional[Any]:
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__a , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :int = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase__ :Optional[Any] = canny.canny(__a )
# assert canny array for at least one True
assert canny_array.any()
def a ( ) -> Optional[Any]:
'''simple docstring'''
assert gg.gaussian_filter(__a , 5 , sigma=0.9 ).all()
def a ( ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
UpperCamelCase__ :Dict = conv.img_convolve(__a , __a ).astype(__a )
assert res.any()
def a ( ) -> Union[str, Any]:
'''simple docstring'''
assert med.median_filter(__a , 3 ).any()
def a ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Dict = sob.sobel_filter(__a )
assert grad.any() and theta.any()
def a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :str = sp.make_sepia(__a , 20 )
assert sepia.all()
def a ( __a = "digital_image_processing/image_data/lena_small.jpg" ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = bs.Burkes(imread(__a , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def a ( __a = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :int = rs.NearestNeighbour(imread(__a , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def a ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Dict = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
UpperCamelCase__ :str = imread(__a , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :str = 0
UpperCamelCase__ :Tuple = image[x_coordinate][y_coordinate]
UpperCamelCase__ :int = lbp.get_neighbors_pixel(
__a , __a , __a , __a )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase__ :str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase__ :List[Any] = lbp.local_binary_value(__a , __a , __a )
assert lbp_image.any() | 97 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 1 |
'''simple docstring'''
import os
from distutils.util import strtobool
def a ( __a , __a ) -> Union[str, Any]:
'''simple docstring'''
for e in env_keys:
UpperCamelCase__ :Optional[int] = int(os.environ.get(__a , -1 ) )
if val >= 0:
return val
return default
def a ( __a , __a=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = os.environ.get(__a , str(__a ) )
return strtobool(__a ) == 1 # As its name indicates `strtobool` actually returns an int...
def a ( __a , __a="no" ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = os.environ.get(__a , str(__a ) )
return value | 97 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = num_of_nodes
UpperCamelCase__ :list[list[int]] = []
UpperCamelCase__ :dict[int, int] = {}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCamelCase__ :Dict = self.find_component(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
UpperCamelCase__ :str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase_ )
elif component_size[u_node] >= component_size[v_node]:
UpperCamelCase__ :Union[str, Any] = self.find_component(UpperCamelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = []
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCamelCase__ :Optional[int] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = edge
UpperCamelCase__ :List[Any] = self.m_component[u]
UpperCamelCase__ :str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCamelCase__ :Dict = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = edge
UpperCamelCase__ :Any = self.m_component[u]
UpperCamelCase__ :List[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
UpperCamelCase__ :List[Any] = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 1 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def a ( __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :str = np.inf
def set_batch_size(__a ) -> None:
nonlocal batch_size
if isinstance(__a , __a ):
UpperCamelCase__ :List[Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__a , __a ):
UpperCamelCase__ :int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__a , __a ) and feature.dtype == "binary":
UpperCamelCase__ :int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__a , __a )
return None if batch_size is np.inf else batch_size
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
UpperCamelCase__ :List[Any] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
UpperCamelCase__ :Union[str, Any] = Parquet(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , hash=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Any = None
UpperCamelCase__ :List[Any] = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :str = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = dataset
UpperCamelCase__ :Optional[int] = path_or_buf
UpperCamelCase__ :Optional[Any] = batch_size or get_writer_batch_size(dataset.features )
UpperCamelCase__ :int = parquet_writer_kwargs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
UpperCamelCase__ :Any = self._write(file_obj=UpperCamelCase_ , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs )
else:
UpperCamelCase__ :Dict = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs )
return written
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :Any = parquet_writer_kwargs.pop('''path_or_buf''' , UpperCamelCase_ )
UpperCamelCase__ :Dict = self.dataset.features.arrow_schema
UpperCamelCase__ :List[Any] = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ , **UpperCamelCase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase_ ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
UpperCamelCase__ :Optional[Any] = query_table(
table=self.dataset._data , key=slice(UpperCamelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase_ )
written += batch.nbytes
writer.close()
return written | 97 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 1 |
'''simple docstring'''
from typing import Any
import numpy as np
def a ( __a ) -> bool:
'''simple docstring'''
return np.array_equal(__a , matrix.conjugate().T )
def a ( __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = v.conjugate().T
UpperCamelCase__ :Optional[Any] = v_star.dot(__a )
assert isinstance(__a , np.ndarray )
return (v_star_dot.dot(__a )) / (v_star.dot(__a ))
def a ( ) -> None:
'''simple docstring'''
UpperCamelCase__ :Any = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCamelCase__ :Tuple = np.array([[1], [2], [3]] )
assert is_hermitian(__a ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(__a , __a ) )
UpperCamelCase__ :Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__a ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(__a , __a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
'''simple docstring'''
import numpy as np
import datasets
__snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
__snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
__snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = np.array(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = np.array(UpperCamelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCamelCase__ :int = X - np.mean(UpperCamelCase_ )
UpperCamelCase__ :List[str] = np.cov(reference_distribution.T )
try:
UpperCamelCase__ :Any = np.linalg.inv(UpperCamelCase_ )
except np.linalg.LinAlgError:
UpperCamelCase__ :Tuple = np.linalg.pinv(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = np.dot(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Dict = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__snake_case = logging.getLogger(__name__)
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_a = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_a = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_a = field(default=A__ , metadata={'help': 'Whether tp freeze the encoder.'} )
_a = field(default=A__ , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_a = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
_a = field(
default=10_24 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a = field(
default=1_28 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
_a = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
_a = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
_a = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
_a = field(default=A__ , metadata={'help': 'Source language id for translation.'} )
_a = field(default=A__ , metadata={'help': 'Target language id for translation.'} )
_a = field(default=A__ , metadata={'help': '# num_beams to use for evaluation.'} )
_a = field(
default=A__ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(__a , os.path.join(__a , f'''{split}_results.json''' ) )
def a ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ :List[str] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__a , __a , __a ):
assert hasattr(__a , __a ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__a , __a , getattr(__a , __a ) )
UpperCamelCase__ :Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ :int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase__ :Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a , __a ):
UpperCamelCase__ :Union[str, Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase__ :List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase__ :str = SeqaSeqDataset
# Get datasets
UpperCamelCase__ :List[Any] = (
dataset_class(
__a , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
UpperCamelCase__ :List[str] = (
dataset_class(
__a , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase__ :Optional[int] = (
dataset_class(
__a , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase__ :List[str] = (
build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None
)
UpperCamelCase__ :Optional[int] = SeqaSeqTrainer(
model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator(
__a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , )
UpperCamelCase__ :Any = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
UpperCamelCase__ :List[str] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase__ :Any = train_result.metrics
UpperCamelCase__ :str = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , __a , training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ :Optional[int] = trainer.evaluate(metric_key_prefix='''val''' )
UpperCamelCase__ :Union[str, Any] = data_args.n_val
UpperCamelCase__ :List[Any] = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCamelCase__ :Any = trainer.predict(test_dataset=__a , metric_key_prefix='''test''' )
UpperCamelCase__ :Optional[int] = test_output.metrics
UpperCamelCase__ :Union[str, Any] = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase__ :str = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
UpperCamelCase__ :Dict = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
UpperCamelCase__ :List[str] = lmap(str.strip , __a )
write_txt_file(__a , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__a , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def a ( __a ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 1 |
'''simple docstring'''
def a ( __a = 50 ) -> int:
'''simple docstring'''
UpperCamelCase__ :Dict = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :List[Any] = MobileBertConfig.from_json_file(__a )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase__ :List[str] = MobileBertForPreTraining(__a )
# Load weights from tf checkpoint
UpperCamelCase__ :str = load_tf_weights_in_mobilebert(__a , __a , __a )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def a ( __a , __a , __a , __a=None ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
UpperCamelCase__ , UpperCamelCase__ :Any = True, True
UpperCamelCase__ :int = dfs(__a , __a , __a , __a )
return path
def a ( __a , __a ) -> str:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :str = -1
for i in range(__a ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
UpperCamelCase__ :List[str] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a ( __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
UpperCamelCase__ , UpperCamelCase__ :int = check_circuit_or_path(__a , __a )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
UpperCamelCase__ :Dict = 1
if check == 2:
UpperCamelCase__ :List[Any] = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
UpperCamelCase__ :Dict = dfs(__a , __a , __a )
print(__a )
def a ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Dict = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
UpperCamelCase__ :Optional[int] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
UpperCamelCase__ :Optional[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
UpperCamelCase__ :List[str] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
UpperCamelCase__ :Optional[Any] = {
1: [],
2: []
# all degree is zero
}
UpperCamelCase__ :Tuple = 10
check_euler(__a , __a )
check_euler(__a , __a )
check_euler(__a , __a )
check_euler(__a , __a )
check_euler(__a , __a )
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 | 1 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''') | 97 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=30 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=0.6 , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :int = parent
UpperCamelCase__ :List[str] = batch_size
UpperCamelCase__ :Optional[int] = image_size
UpperCamelCase__ :List[str] = patch_size
UpperCamelCase__ :int = num_channels
UpperCamelCase__ :Tuple = is_training
UpperCamelCase__ :Optional[int] = use_labels
UpperCamelCase__ :Optional[Any] = hidden_size
UpperCamelCase__ :List[str] = num_hidden_layers
UpperCamelCase__ :Optional[int] = num_attention_heads
UpperCamelCase__ :Optional[Any] = intermediate_size
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :str = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[Any] = type_sequence_label_size
UpperCamelCase__ :Tuple = initializer_range
UpperCamelCase__ :int = mask_ratio
UpperCamelCase__ :Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ :Union[str, Any] = (image_size // patch_size) ** 2
UpperCamelCase__ :List[str] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Optional[Any] = None
if self.use_labels:
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ViTMAEModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ViTMAEForPreTraining(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(UpperCamelCase_ )
UpperCamelCase__ :str = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ :List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase__ :str = 1
UpperCamelCase__ :Dict = ViTMAEForPreTraining(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :str = model(UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = config_and_inputs
UpperCamelCase__ :str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_a = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = ViTMAEModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Tuple = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(UpperCamelCase_ )
UpperCamelCase__ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Tuple = [*signature.parameters.keys()]
UpperCamelCase__ :Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ :Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase__ :List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ :int = torch.from_numpy(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ :List[Any] = pt_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :List[str] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ :List[str] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase__ :Optional[Any] = outputs[0].cpu().numpy()
UpperCamelCase__ :Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :List[str] = model_class.from_pretrained(UpperCamelCase_ )
model.to(UpperCamelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ :Any = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
# Make sure we don't have nans
UpperCamelCase__ :int = after_outputs[0].cpu().numpy()
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Any = ViTMAEModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def a ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ :Tuple = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.default_image_processor
UpperCamelCase__ :Dict = prepare_img()
UpperCamelCase__ :Tuple = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ :Optional[int] = ViTMAEConfig()
UpperCamelCase__ :List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase__ :Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase__ :Dict = model(**UpperCamelCase_ , noise=torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ ) )
# verify the logits
UpperCamelCase__ :str = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCamelCase__ :Dict = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase_ ) , atol=1e-4 ) ) | 97 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 | 1 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a ( __a ) -> Any:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a ( ) -> List[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''mock-s3-bucket'''
UpperCamelCase__ :int = f'''s3://{mock_bucket}'''
UpperCamelCase__ :List[str] = extract_path_from_uri(__a )
assert dataset_path.startswith('''s3://''' ) is False
UpperCamelCase__ :Optional[int] = '''./local/path'''
UpperCamelCase__ :Union[str, Any] = extract_path_from_uri(__a )
assert dataset_path == new_dataset_path
def a ( __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = is_remote_filesystem(__a )
assert is_remote is True
UpperCamelCase__ :Union[str, Any] = fsspec.filesystem('''file''' )
UpperCamelCase__ :Optional[int] = is_remote_filesystem(__a )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __a )
def a ( __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Tuple = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
UpperCamelCase__ :Union[str, Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCamelCase__ :Optional[int] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__a )
UpperCamelCase__ :Dict = fsspec.filesystem(compression_fs_class.protocol , fo=__a )
assert isinstance(__a , __a )
UpperCamelCase__ :str = os.path.basename(__a )
UpperCamelCase__ :Optional[Any] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__a , '''r''' , encoding='''utf-8''' ) as f, open(__a , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :Tuple = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
UpperCamelCase__ :Any = compressed_file_paths[protocol]
UpperCamelCase__ :Union[str, Any] = '''dataset.jsonl'''
UpperCamelCase__ :str = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
UpperCamelCase__ , *UpperCamelCase__ :Dict = fsspec.get_fs_token_paths(__a )
assert fs.isfile(__a )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def a ( __a , __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Any = hf_api.dataset_info(__a , token=__a )
UpperCamelCase__ :List[str] = HfFileSystem(repo_info=__a , token=__a )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__a ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__a , __a , clobber=__a )
with pytest.warns(__a ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__a ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
) | 97 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 | 1 |
'''simple docstring'''
def a ( __a , __a , __a=False ) -> str:
'''simple docstring'''
if isinstance(__a , __a ) and isinstance(__a , __a ):
UpperCamelCase__ :List[str] = len(set_a.intersection(__a ) )
if alternative_union:
UpperCamelCase__ :Optional[Any] = len(__a ) + len(__a )
else:
UpperCamelCase__ :Union[str, Any] = len(set_a.union(__a ) )
return intersection / union
if isinstance(__a , (list, tuple) ) and isinstance(__a , (list, tuple) ):
UpperCamelCase__ :Tuple = [element for element in set_a if element in set_b]
if alternative_union:
UpperCamelCase__ :Dict = len(__a ) + len(__a )
return len(__a ) / union
else:
UpperCamelCase__ :List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(__a ) / len(__a )
return len(__a ) / len(__a )
return None
if __name__ == "__main__":
__snake_case = {'''a''', '''b''', '''c''', '''d''', '''e'''}
__snake_case = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b)) | 97 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=UpperCamelCase_ , speech_processor=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , )
def lowerCAmelCase__ ( self , UpperCamelCase_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase__ :int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase_ )
@torch.no_grad()
def __call__( self , UpperCamelCase_ , UpperCamelCase_=16000 , UpperCamelCase_ = 512 , UpperCamelCase_ = 512 , UpperCamelCase_ = 50 , UpperCamelCase_ = 7.5 , UpperCamelCase_ = None , UpperCamelCase_ = 1 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = 1 , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = self.speech_processor.feature_extractor(
UpperCamelCase_ , return_tensors='''pt''' , sampling_rate=UpperCamelCase_ ).input_features.to(self.device )
UpperCamelCase__ :List[str] = self.speech_model.generate(UpperCamelCase_ , max_length=480000 )
UpperCamelCase__ :Optional[Any] = self.speech_processor.tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , normalize=UpperCamelCase_ )[
0
]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Dict = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[Any] = len(UpperCamelCase_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCamelCase_ )}.''' )
# get prompt text embeddings
UpperCamelCase__ :Optional[int] = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCamelCase__ :List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ :Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase__ :Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ :Dict = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Any = text_embeddings.shape
UpperCamelCase__ :Dict = text_embeddings.repeat(1 , UpperCamelCase_ , 1 )
UpperCamelCase__ :Dict = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase__ :Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ :List[str]
if negative_prompt is None:
UpperCamelCase__ :Dict = [''''''] * batch_size
elif type(UpperCamelCase_ ) is not type(UpperCamelCase_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase_ )} !='''
F''' {type(UpperCamelCase_ )}.''' )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Tuple = [negative_prompt]
elif batch_size != len(UpperCamelCase_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
UpperCamelCase__ :int = negative_prompt
UpperCamelCase__ :Optional[Any] = text_input_ids.shape[-1]
UpperCamelCase__ :List[Any] = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ :Tuple = uncond_embeddings.shape[1]
UpperCamelCase__ :Any = uncond_embeddings.repeat(1 , UpperCamelCase_ , 1 )
UpperCamelCase__ :List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ :Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase__ :Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase__ :int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase__ :Optional[int] = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device='''cpu''' , dtype=UpperCamelCase_ ).to(
self.device )
else:
UpperCamelCase__ :Tuple = torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase__ :str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase__ :List[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ :List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ :Tuple = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ :List[Any] = {}
if accepts_eta:
UpperCamelCase__ :List[str] = eta
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ :int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ :int = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
UpperCamelCase__ :List[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = noise_pred.chunk(2 )
UpperCamelCase__ :int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ :Tuple = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Dict = 1 / 0.18215 * latents
UpperCamelCase__ :Dict = self.vae.decode(UpperCamelCase_ ).sample
UpperCamelCase__ :Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ :List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ :Optional[int] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCamelCase_ , nsfw_content_detected=UpperCamelCase_ ) | 97 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ :List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ :Optional[int] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ :Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ :int = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ :Union[str, Any] = 8
else:
UpperCamelCase__ :Any = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCamelCase__ :Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __a ) == "1":
UpperCamelCase__ :Union[str, Any] = 2
# New Code #
UpperCamelCase__ :Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
UpperCamelCase__ :int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Union[str, Any] = config['''lr''']
UpperCamelCase__ :Dict = int(config['''num_epochs'''] )
UpperCamelCase__ :str = int(config['''seed'''] )
UpperCamelCase__ :Optional[Any] = int(config['''batch_size'''] )
UpperCamelCase__ :Tuple = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ :Any = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ :Union[str, Any] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
UpperCamelCase__ :List[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__a ):
UpperCamelCase__ :Optional[Any] = model(**__a )
UpperCamelCase__ :List[Any] = output.loss
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :int = model(**__a )
UpperCamelCase__ :str = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __a )
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__a , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCamelCase__ :Tuple = parser.parse_args()
UpperCamelCase__ :List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__snake_case = logging.get_logger(__name__)
# General docstring
__snake_case = '''RegNetConfig'''
# Base docstring
__snake_case = '''facebook/regnet-y-040'''
__snake_case = [1, 1088, 7, 7]
# Image classification docstring
__snake_case = '''facebook/regnet-y-040'''
__snake_case = '''tabby, tabby cat'''
__snake_case = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = 3 , UpperCamelCase_ = 1 , UpperCamelCase_ = 1 , UpperCamelCase_ = "relu" , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase__ :List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase__ :Dict = tf.keras.layers.ConvaD(
filters=UpperCamelCase_ , kernel_size=UpperCamelCase_ , strides=UpperCamelCase_ , padding='''VALID''' , groups=UpperCamelCase_ , use_bias=UpperCamelCase_ , name='''convolution''' , )
UpperCamelCase__ :Tuple = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
UpperCamelCase__ :Dict = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.convolution(self.padding(UpperCamelCase_ ) )
UpperCamelCase__ :int = self.normalization(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.activation(UpperCamelCase_ )
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Dict = config.num_channels
UpperCamelCase__ :Union[str, Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = shape_list(UpperCamelCase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase__ :int = tf.transpose(UpperCamelCase_ , perm=(0, 2, 3, 1) )
UpperCamelCase__ :Dict = self.embedder(UpperCamelCase_ )
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = 2 , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tf.keras.layers.ConvaD(
filters=UpperCamelCase_ , kernel_size=1 , strides=UpperCamelCase_ , use_bias=UpperCamelCase_ , name='''convolution''' )
UpperCamelCase__ :Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = False ):
'''simple docstring'''
return self.normalization(self.convolution(UpperCamelCase_ ) , training=UpperCamelCase_ )
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase_ , name='''pooler''' )
UpperCamelCase__ :str = [
tf.keras.layers.ConvaD(filters=UpperCamelCase_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=UpperCamelCase_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = self.pooler(UpperCamelCase_ )
for layer_module in self.attention:
UpperCamelCase__ :Tuple = layer_module(UpperCamelCase_ )
UpperCamelCase__ :Tuple = hidden_state * pooled
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1 , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = in_channels != out_channels or stride != 1
UpperCamelCase__ :Any = max(1 , out_channels // config.groups_width )
UpperCamelCase__ :Optional[Any] = (
TFRegNetShortCut(UpperCamelCase_ , stride=UpperCamelCase_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase__ :Optional[Any] = [
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
UpperCamelCase_ , stride=UpperCamelCase_ , groups=UpperCamelCase_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ , name='''layer.2''' ),
]
UpperCamelCase__ :List[Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = hidden_state
for layer_module in self.layers:
UpperCamelCase__ :int = layer_module(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = self.shortcut(UpperCamelCase_ )
hidden_state += residual
UpperCamelCase__ :str = self.activation(UpperCamelCase_ )
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1 , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = in_channels != out_channels or stride != 1
UpperCamelCase__ :int = max(1 , out_channels // config.groups_width )
UpperCamelCase__ :Tuple = (
TFRegNetShortCut(UpperCamelCase_ , stride=UpperCamelCase_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
UpperCamelCase__ :List[Any] = [
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
UpperCamelCase_ , stride=UpperCamelCase_ , groups=UpperCamelCase_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(UpperCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ , name='''layer.3''' ),
]
UpperCamelCase__ :int = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = hidden_state
for layer_module in self.layers:
UpperCamelCase__ :Optional[Any] = layer_module(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = self.shortcut(UpperCamelCase_ )
hidden_state += residual
UpperCamelCase__ :Dict = self.activation(UpperCamelCase_ )
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 2 , UpperCamelCase_ = 2 , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :List[str] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
UpperCamelCase__ :Optional[Any] = [
# downsampling is done in the first layer with stride of 2
layer(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ , name='''layers.0''' ),
*[layer(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for layer_module in self.layers:
UpperCamelCase__ :List[str] = layer_module(UpperCamelCase_ )
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :int = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
UpperCamelCase__ :int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCamelCase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , depth=UpperCamelCase_ , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = True ):
'''simple docstring'''
UpperCamelCase__ :Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase__ :List[Any] = hidden_states + (hidden_state,)
UpperCamelCase__ :Optional[Any] = stage_module(UpperCamelCase_ )
if output_hidden_states:
UpperCamelCase__ :Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase_ , hidden_states=UpperCamelCase_ )
@keras_serializable
class lowercase ( tf.keras.layers.Layer ):
"""simple docstring"""
_a = RegNetConfig
def __init__( self , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Any = config
UpperCamelCase__ :Dict = TFRegNetEmbeddings(UpperCamelCase_ , name='''embedder''' )
UpperCamelCase__ :Any = TFRegNetEncoder(UpperCamelCase_ , name='''encoder''' )
UpperCamelCase__ :Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase_ , name='''pooler''' )
@unpack_inputs
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ :Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ :List[str] = self.embedder(UpperCamelCase_ , training=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.encoder(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = encoder_outputs[0]
UpperCamelCase__ :str = self.pooler(UpperCamelCase_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase__ :Tuple = tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) )
UpperCamelCase__ :List[str] = tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase__ :Union[str, Any] = tuple([tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase_ , pooler_output=UpperCamelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase ( A__ ):
"""simple docstring"""
_a = RegNetConfig
_a = 'regnet'
_a = 'pixel_values'
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__snake_case = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
__snake_case = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , A__ , )
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :str = TFRegNetMainLayer(UpperCamelCase_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=False , ):
'''simple docstring'''
UpperCamelCase__ :str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ :Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ :Tuple = self.regnet(
pixel_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , A__ , )
class lowercase ( A__ , A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[str] = config.num_labels
UpperCamelCase__ :Dict = TFRegNetMainLayer(UpperCamelCase_ , name='''regnet''' )
# classification head
UpperCamelCase__ :str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=False , ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ :List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ :List[str] = self.regnet(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ )
UpperCamelCase__ :str = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase__ :Tuple = self.classifier[0](UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.classifier[1](UpperCamelCase_ )
UpperCamelCase__ :Dict = None if labels is None else self.hf_compute_loss(labels=UpperCamelCase_ , logits=UpperCamelCase_ )
if not return_dict:
UpperCamelCase__ :Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states ) | 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 1 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=99 , UpperCamelCase_=13 , UpperCamelCase_=16 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=2 , UpperCamelCase_=32 , UpperCamelCase_=4 , UpperCamelCase_=4 , UpperCamelCase_=30 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = parent
UpperCamelCase__ :Optional[int] = batch_size
UpperCamelCase__ :Any = decoder_seq_length
# For common tests
UpperCamelCase__ :List[Any] = self.decoder_seq_length
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :List[str] = use_attention_mask
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :Any = vocab_size
UpperCamelCase__ :Optional[Any] = d_model
UpperCamelCase__ :Any = d_model
UpperCamelCase__ :Tuple = decoder_layers
UpperCamelCase__ :List[Any] = decoder_layers
UpperCamelCase__ :Optional[Any] = decoder_ffn_dim
UpperCamelCase__ :int = decoder_attention_heads
UpperCamelCase__ :List[str] = decoder_attention_heads
UpperCamelCase__ :Optional[Any] = eos_token_id
UpperCamelCase__ :Any = bos_token_id
UpperCamelCase__ :Optional[Any] = pad_token_id
UpperCamelCase__ :List[str] = decoder_start_token_id
UpperCamelCase__ :Dict = use_cache
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Tuple = None
UpperCamelCase__ :List[Any] = decoder_seq_length
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :List[str] = 1
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ :Union[str, Any] = None
if self.use_attention_mask:
UpperCamelCase__ :Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase__ :str = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ :Tuple = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = True
UpperCamelCase__ :int = TrOCRDecoder(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval()
UpperCamelCase__ :Any = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase__ :List[Any] = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = model(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 )
UpperCamelCase__ :Optional[int] = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ :Union[str, Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase__ :Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Dict = model(UpperCamelCase_ )['''last_hidden_state''']
UpperCamelCase__ :Dict = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ )['''last_hidden_state''']
# select random slice
UpperCamelCase__ :Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :Optional[Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase__ :Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = config_and_inputs
UpperCamelCase__ :Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_a = (TrOCRForCausalLM,) if is_torch_available() else ()
_a = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
_a = True
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase_ )
UpperCamelCase__ :Dict = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__snake_case = 299792458
# Symbols
__snake_case , __snake_case , __snake_case , __snake_case = symbols('''ct x y z''')
def a ( __a ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def a ( __a ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(__a ) ** 2 )
def a ( __a ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(__a ), -gamma(__a ) * beta(__a ), 0, 0],
[-gamma(__a ) * beta(__a ), gamma(__a ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def a ( __a , __a = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
UpperCamelCase__ :Any = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__a ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__snake_case = transform(29979245)
print('''Example of four vector: ''')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__snake_case = {ct: c, x: 1, y: 1, z: 1}
__snake_case = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""") | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LayoutLMv2FeatureExtractor''']
__snake_case = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 1 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__snake_case = parse(importlib.metadata.version('''torch'''))
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
UpperCamelCase__ :str = STR_OPERATION_TO_FUNC[operation]
if isinstance(__a , __a ):
UpperCamelCase__ :Dict = parse(importlib.metadata.version(__a ) )
return operation(__a , parse(__a ) )
def a ( __a , __a ) -> str:
'''simple docstring'''
return compare_versions(__a , __a , __a ) | 97 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 1 |
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def a ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase__ :List[Any] = parser.parse_args()
return args.f
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(UpperCamelCase_ , '''argv''' , UpperCamelCase_ ):
UpperCamelCase__ :Tuple = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(UpperCamelCase_ , 0.666 )
@slow
@require_torch_non_multi_gpu
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(UpperCamelCase_ )
UpperCamelCase__ :str = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(UpperCamelCase_ ) | 97 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 1 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_="" , UpperCamelCase_="train" ):
'''simple docstring'''
assert os.path.isdir(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = []
UpperCamelCase__ :Dict = os.listdir(UpperCamelCase_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCamelCase__ :Optional[Any] = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ):
continue
self.documents.append(UpperCamelCase_ )
def __len__( self ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = self.documents[idx]
UpperCamelCase__ :str = document_path.split('''/''' )[-1]
with open(UpperCamelCase_ , encoding='''utf-8''' ) as source:
UpperCamelCase__ :int = source.read()
UpperCamelCase__ , UpperCamelCase__ :int = process_story(UpperCamelCase_ )
return document_name, story_lines, summary_lines
def a ( __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :str = list(filter(lambda __a : len(__a ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
UpperCamelCase__ :Optional[Any] = [_add_missing_period(__a ) for line in nonempty_lines]
# gather article lines
UpperCamelCase__ :Dict = []
UpperCamelCase__ :Optional[Any] = deque(__a )
while True:
try:
UpperCamelCase__ :List[Any] = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(__a )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCamelCase__ :Tuple = list(filter(lambda __a : not t.startswith('''@highlight''' ) , __a ) )
return story_lines, summary_lines
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :int = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def a ( __a , __a , __a ) -> Optional[int]:
'''simple docstring'''
if len(__a ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__a )) )
return sequence
def a ( __a , __a ) -> str:
'''simple docstring'''
UpperCamelCase__ :List[str] = torch.ones_like(__a )
UpperCamelCase__ :Tuple = sequence == pad_token_id
UpperCamelCase__ :List[Any] = 0
return mask
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Any = [tokenizer.encode(__a ) for line in story_lines]
UpperCamelCase__ :List[Any] = [token for sentence in story_lines_token_ids for token in sentence]
UpperCamelCase__ :int = [tokenizer.encode(__a ) for line in summary_lines]
UpperCamelCase__ :Tuple = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def a ( __a , __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[Any] = []
for sequence in batch:
UpperCamelCase__ :Dict = -1
UpperCamelCase__ :List[str] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__a )
return torch.tensor(__a ) | 97 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 1 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__snake_case = logging.getLogger(__name__)
def a ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :Dict = bnb_quantization_config.load_in_abit
UpperCamelCase__ :Any = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCamelCase__ :int = []
# custom device map
if isinstance(__a , __a ) and len(device_map.keys() ) > 1:
UpperCamelCase__ :List[str] = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase__ :List[Any] = get_keys_to_not_convert(__a )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__a )
UpperCamelCase__ :int = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase__ :List[Any] = []
UpperCamelCase__ :Union[str, Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__a )
# compatibility with peft
UpperCamelCase__ :List[str] = load_in_abit
UpperCamelCase__ :Dict = load_in_abit
UpperCamelCase__ :str = get_parameter_device(__a )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCamelCase__ :List[Any] = replace_with_bnb_layers(__a , __a , modules_to_not_convert=__a )
# convert param to the right dtype
UpperCamelCase__ :List[str] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase__ :List[str] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCamelCase__ :Tuple = getattr(__a , __a , __a )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__a ):
param.to(__a )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
UpperCamelCase__ :List[Any] = replace_with_bnb_layers(
__a , __a , modules_to_not_convert=__a )
UpperCamelCase__ :Dict = get_quantized_model_device_map(
__a , __a , __a , max_memory=__a , no_split_module_classes=__a , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase__ :Dict = True
UpperCamelCase__ :int = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__a , __a , __a , dtype=bnb_quantization_config.torch_dtype , offload_folder=__a , offload_state_dict=__a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__a , device_map=__a , offload_dir=__a )
def a ( __a , __a , __a=None , __a=None , __a=None ) -> Dict:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase__ :str = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__a , __a ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCamelCase__ :Any = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase__ :List[Any] = {}
UpperCamelCase__ :str = special_dtypes
UpperCamelCase__ :Union[str, Any] = no_split_module_classes
UpperCamelCase__ :int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase__ :Optional[Any] = get_balanced_memory(
__a , low_zero=(device_map == '''balanced_low_0''') , max_memory=__a , **__a , )
UpperCamelCase__ :List[Any] = max_memory
UpperCamelCase__ :Optional[int] = infer_auto_device_map(__a , **__a )
if isinstance(__a , __a ):
# check if don't have any quantized module on the cpu
UpperCamelCase__ :int = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase__ :Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def a ( __a , __a , __a=None , __a=None ) -> List[Any]:
'''simple docstring'''
if modules_to_not_convert is None:
UpperCamelCase__ :Optional[Any] = []
UpperCamelCase__ , UpperCamelCase__ :str = _replace_with_bnb_layers(
__a , __a , __a , __a )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( __a , __a , __a=None , __a=None , ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Any = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase__ :Optional[int] = []
current_key_name.append(__a )
if isinstance(__a , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase__ :Any = '''.'''.join(__a )
UpperCamelCase__ :Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase__ :List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase__ :List[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__a , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase__ :str = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCamelCase__ :Tuple = module.weight.data
if module.bias is not None:
UpperCamelCase__ :Tuple = module.bias.data
bnb_module.requires_grad_(__a )
setattr(__a , __a , __a )
UpperCamelCase__ :Optional[Any] = True
if len(list(module.children() ) ) > 0:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = _replace_with_bnb_layers(
__a , __a , __a , __a )
UpperCamelCase__ :Dict = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
with init_empty_weights():
UpperCamelCase__ :Dict = deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase__ :Union[str, Any] = find_tied_parameters(__a )
# For compatibility with Accelerate < 0.18
if isinstance(__a , __a ):
UpperCamelCase__ :Optional[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase__ :List[Any] = sum(__a , [] )
UpperCamelCase__ :Optional[Any] = len(__a ) > 0
# Check if it is a base model
UpperCamelCase__ :Optional[int] = False
if hasattr(__a , '''base_model_prefix''' ):
UpperCamelCase__ :Tuple = not hasattr(__a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase__ :Dict = list(model.named_children() )
UpperCamelCase__ :Any = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase__ :Tuple = set(__a ) - set(__a )
UpperCamelCase__ :Tuple = list(set(__a ) ) + list(__a )
# remove ".weight" from the keys
UpperCamelCase__ :Dict = ['''.weight''', '''.bias''']
UpperCamelCase__ :Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase__ :Dict = name.replace(__a , '''''' )
filtered_module_names.append(__a )
return filtered_module_names
def a ( __a ) -> str:
'''simple docstring'''
for m in model.modules():
if isinstance(__a , bnb.nn.Linearabit ):
return True
return False
def a ( __a ) -> Any:
'''simple docstring'''
return next(parameter.parameters() ).device
def a ( __a , __a , __a , __a , __a , __a , __a ) -> int:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(__a , __a , 0 , dtype=__a , value=__a )
UpperCamelCase__ :str = param_name
UpperCamelCase__ :Optional[int] = model
if "." in tensor_name:
UpperCamelCase__ :Union[str, Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase__ :Any = getattr(__a , __a )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
UpperCamelCase__ :List[str] = new_module
UpperCamelCase__ :Tuple = splits[-1]
# offload weights
UpperCamelCase__ :str = False
offload_weight(module._parameters[tensor_name] , __a , __a , index=__a )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __a , index=__a , )
else:
offload_weight(__a , __a , __a , index=__a )
offload_weight(__a , param_name.replace('''weight''' , '''SCB''' ) , __a , index=__a )
set_module_tensor_to_device(__a , __a , '''meta''' , dtype=__a , value=torch.empty(*param.size() ) ) | 97 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 1 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__snake_case = logging.get_logger(__name__)
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **UpperCamelCase_ ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCamelCase__ :List[Any] = deprecated_arg[3:]
setattr(self , UpperCamelCase_ , not kwargs.pop(UpperCamelCase_ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCamelCase__ :List[str] = kwargs.pop('''torchscript''' , self.torchscript )
UpperCamelCase__ :Union[str, Any] = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
UpperCamelCase__ :Tuple = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**UpperCamelCase_ )
_a = field(default=A__ , metadata={'help': 'Trace the models using torchscript'} )
_a = field(default=A__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
_a = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
UpperCamelCase__ :List[str] = torch.device('''cpu''' )
UpperCamelCase__ :Optional[Any] = 0
elif is_torch_tpu_available():
UpperCamelCase__ :List[Any] = xm.xla_device()
UpperCamelCase__ :List[Any] = 0
else:
UpperCamelCase__ :Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
UpperCamelCase__ :str = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.n_gpu > 0 | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowercase ( A__ ):
"""simple docstring"""
_a = CustomTokenizer
pass | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
'''simple docstring'''
def a ( __a ) -> list:
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCamelCase__ :Optional[int] = gray_code_sequence_string(__a )
#
# convert them to integers
for i in range(len(__a ) ):
UpperCamelCase__ :Tuple = int(sequence[i] , 2 )
return sequence
def a ( __a ) -> list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCamelCase__ :Union[str, Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCamelCase__ :List[str] = gray_code_sequence_string(bit_count - 1 )
UpperCamelCase__ :Dict = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCamelCase__ :Dict = '''0''' + smaller_sequence[i]
sequence.append(__a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCamelCase__ :str = '''1''' + smaller_sequence[i]
sequence.append(__a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
__snake_case = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
__snake_case = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :str = set()
UpperCamelCase__ :Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase__ :Tuple = char
UpperCamelCase__ :List[Any] = set(__a )
return pairs
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Optional[Any] = vocab_file
UpperCamelCase__ :Any = merges_file
UpperCamelCase__ :str = {}
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :Any = 1
UpperCamelCase__ :Optional[Any] = 2
UpperCamelCase__ :str = 3
self.add_from_file(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
UpperCamelCase__ :List[str] = merges_handle.read().split('''\n''' )[:-1]
UpperCamelCase__ :Tuple = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCamelCase__ :List[Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :Tuple = {}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ :Union[str, Any] = [self.cls_token_id]
UpperCamelCase__ :List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [self.sep_token_id]
UpperCamelCase__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCamelCase__ :List[Any] = tuple(UpperCamelCase_ )
UpperCamelCase__ :int = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
UpperCamelCase__ :Dict = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase__ :Dict = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase__ , UpperCamelCase__ :List[str] = bigram
UpperCamelCase__ :Union[str, Any] = []
UpperCamelCase__ :Any = 0
while i < len(UpperCamelCase_ ):
try:
UpperCamelCase__ :Any = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase__ :Any = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase__ :Optional[int] = tuple(UpperCamelCase_ )
UpperCamelCase__ :List[str] = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
UpperCamelCase__ :Tuple = get_pairs(UpperCamelCase_ )
UpperCamelCase__ :str = '''@@ '''.join(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = word[:-4]
UpperCamelCase__ :Any = word
return word
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = []
UpperCamelCase__ :Optional[Any] = re.findall(r'''\S+\n?''' , UpperCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = ''' '''.join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :Any = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.merges_file , UpperCamelCase_ )
return out_vocab_file, out_merge_file
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
try:
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(UpperCamelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
UpperCamelCase__ :Union[str, Any] = f.readlines()
for lineTmp in lines:
UpperCamelCase__ :Optional[Any] = lineTmp.strip()
UpperCamelCase__ :Optional[Any] = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
UpperCamelCase__ :List[Any] = line[:idx]
UpperCamelCase__ :Dict = len(self.encoder ) | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spm_char.model'''}
__snake_case = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
__snake_case = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
UpperCamelCase__ :Any = vocab_file
UpperCamelCase__ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.__dict__.copy()
UpperCamelCase__ :Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :Optional[Any] = {}
UpperCamelCase__ :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.sp_model.IdToPiece(UpperCamelCase_ )
return token
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = []
UpperCamelCase__ :Optional[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
UpperCamelCase__ :Optional[Any] = []
else:
current_sub_tokens.append(UpperCamelCase_ )
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = [1]
if token_ids_a is None:
return ([0] * len(UpperCamelCase_ )) + suffix_ones
return ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
UpperCamelCase__ :List[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,) | 97 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 | 1 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCamelCase_ )
UpperCamelCase__ :str = self.values[key]
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return (
sum(self.charge_factor - len(UpperCamelCase_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase_ ) == 0
):
return key
return super()._collision_resolution(UpperCamelCase_ , UpperCamelCase_ ) | 97 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 1 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''num_attention_heads''' ) )
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=64 , UpperCamelCase_=3 , UpperCamelCase_=3 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=16 , UpperCamelCase_=[128, 256, 384] , UpperCamelCase_=[4, 6, 8] , UpperCamelCase_=[2, 3, 4] , UpperCamelCase_=[16, 16, 16] , UpperCamelCase_=0 , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=2 , ):
'''simple docstring'''
UpperCamelCase__ :Any = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :str = image_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :List[str] = stride
UpperCamelCase__ :Dict = padding
UpperCamelCase__ :Optional[int] = hidden_sizes
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :List[Any] = depths
UpperCamelCase__ :int = key_dim
UpperCamelCase__ :int = drop_path_rate
UpperCamelCase__ :int = patch_size
UpperCamelCase__ :List[Any] = attention_ratio
UpperCamelCase__ :Optional[int] = mlp_ratio
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :Any = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase__ :Optional[int] = is_training
UpperCamelCase__ :Optional[Any] = use_labels
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = initializer_range
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :int = None
if self.use_labels:
UpperCamelCase__ :str = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = LevitModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :int = model(UpperCamelCase_ )
UpperCamelCase__ :int = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ :int = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase__ :Optional[Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.num_labels
UpperCamelCase__ :List[Any] = LevitForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = config_and_inputs
UpperCamelCase__ :List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_a = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = LevitModelTester(self )
UpperCamelCase__ :Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Dict = model_class(UpperCamelCase_ )
UpperCamelCase__ :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Tuple = [*signature.parameters.keys()]
UpperCamelCase__ :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :str = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase__ :List[str] = outputs.hidden_states
UpperCamelCase__ :int = len(self.model_tester.depths ) + 1
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
UpperCamelCase__ :Any = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ :Optional[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase__ :Union[str, Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Any = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :Union[str, Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :str = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase_ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase__ :List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
UpperCamelCase__ :Tuple = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
UpperCamelCase__ :Any = model(**UpperCamelCase_ ).loss
loss.backward()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__ :int = False
UpperCamelCase__ :Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase__ :Any = model_class(UpperCamelCase_ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase_ )
model.train()
UpperCamelCase__ :List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
UpperCamelCase__ :Any = model(**UpperCamelCase_ ).loss
loss.backward()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :int = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase_ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCamelCase__ :List[Any] = problem_type['''title''']
UpperCamelCase__ :Tuple = problem_type['''num_labels''']
UpperCamelCase__ :int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if problem_type["num_labels"] > 1:
UpperCamelCase__ :Tuple = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
UpperCamelCase__ :Union[str, Any] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase_ ) as warning_list:
UpperCamelCase__ :Optional[int] = model(**UpperCamelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = LevitModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def a ( ) -> int:
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCamelCase_ )
UpperCamelCase__ :Dict = self.default_image_processor
UpperCamelCase__ :Union[str, Any] = prepare_img()
UpperCamelCase__ :List[Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ :List[Any] = model(**UpperCamelCase_ )
# verify the logits
UpperCamelCase__ :int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCamelCase__ :int = torch.tensor([1.0448, -0.3745, -1.8317] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) ) | 97 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 | 1 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1.0 , UpperCamelCase_ = None , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Dict = initial_learning_rate
UpperCamelCase__ :Optional[int] = warmup_steps
UpperCamelCase__ :str = power
UpperCamelCase__ :Dict = decay_schedule_fn
UpperCamelCase__ :List[Any] = name
def __call__( self , UpperCamelCase_ ):
'''simple docstring'''
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase__ :int = tf.cast(UpperCamelCase_ , tf.floataa )
UpperCamelCase__ :int = tf.cast(self.warmup_steps , tf.floataa )
UpperCamelCase__ :Any = global_step_float / warmup_steps_float
UpperCamelCase__ :Union[str, Any] = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def a ( __a , __a , __a , __a = 0.0 , __a = 0.9 , __a = 0.9_9_9 , __a = 1e-8 , __a = None , __a = None , __a = 0.0 , __a = 1.0 , __a = None , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__a , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__a , )
if num_warmup_steps:
UpperCamelCase__ :int = WarmUp(
initial_learning_rate=__a , decay_schedule_fn=__a , warmup_steps=__a , )
if weight_decay_rate > 0.0:
UpperCamelCase__ :int = AdamWeightDecay(
learning_rate=__a , weight_decay_rate=__a , beta_a=__a , beta_a=__a , epsilon=__a , clipnorm=__a , global_clipnorm=__a , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=__a , )
else:
UpperCamelCase__ :Optional[int] = tf.keras.optimizers.Adam(
learning_rate=__a , beta_a=__a , beta_a=__a , epsilon=__a , clipnorm=__a , global_clipnorm=__a , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ = 0.001 , UpperCamelCase_ = 0.9 , UpperCamelCase_ = 0.999 , UpperCamelCase_ = 1e-7 , UpperCamelCase_ = False , UpperCamelCase_ = 0.0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "AdamWeightDecay" , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Any = weight_decay_rate
UpperCamelCase__ :Union[str, Any] = include_in_weight_decay
UpperCamelCase__ :Optional[int] = exclude_from_weight_decay
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[str] = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :List[str] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase__ :Dict = apply_state or {}
UpperCamelCase__ :str = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCamelCase__ :List[str] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
UpperCamelCase__ :Dict = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
UpperCamelCase__ :Dict = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = []
UpperCamelCase__ :Tuple = None
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self._accum_steps is None:
UpperCamelCase__ :List[str] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , UpperCamelCase_ ):
'''simple docstring'''
if not self._gradients:
UpperCamelCase__ :Tuple = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) ) | 97 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( __a , __a ) -> float:
'''simple docstring'''
UpperCamelCase__ :Any = u
for i in range(1 , __a ):
UpperCamelCase__ :Union[str, Any] = temp * (u - i)
return temp
def a ( ) -> None:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = int(input('''enter the numbers of values: ''' ) )
UpperCamelCase__ :list[list[float]] = []
for _ in range(__a ):
y.append([] )
for i in range(__a ):
for j in range(__a ):
y[i].append(__a )
UpperCamelCase__ :int = 0
print('''enter the values of parameters in a list: ''' )
UpperCamelCase__ :str = list(map(__a , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__a ):
UpperCamelCase__ :Any = float(input() )
UpperCamelCase__ :Tuple = int(input('''enter the value to interpolate: ''' ) )
UpperCamelCase__ :Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __a ):
for j in range(n - i ):
UpperCamelCase__ :Tuple = y[j + 1][i - 1] - y[j][i - 1]
UpperCamelCase__ :int = y[0][0]
for i in range(1 , __a ):
summ += (ucal(__a , __a ) * y[0][i]) / math.factorial(__a )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=16 , UpperCamelCase_=36 , UpperCamelCase_=6 , UpperCamelCase_=6 , UpperCamelCase_=6 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = seq_length
UpperCamelCase__ :Optional[int] = is_training
UpperCamelCase__ :Union[str, Any] = use_input_mask
UpperCamelCase__ :List[str] = use_token_type_ids
UpperCamelCase__ :List[Any] = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :Dict = embedding_size
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :List[Any] = num_hidden_groups
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_size
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :List[str] = type_vocab_size
UpperCamelCase__ :Dict = type_sequence_label_size
UpperCamelCase__ :Union[str, Any] = initializer_range
UpperCamelCase__ :List[Any] = num_labels
UpperCamelCase__ :Union[str, Any] = num_choices
UpperCamelCase__ :Tuple = scope
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Dict = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ :int = None
UpperCamelCase__ :str = None
UpperCamelCase__ :Optional[Any] = None
if self.use_labels:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = AlbertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
UpperCamelCase__ :int = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = AlbertForPreTraining(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , sentence_order_label=UpperCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = AlbertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = AlbertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = self.num_labels
UpperCamelCase__ :Any = AlbertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Union[str, Any] = AlbertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.num_choices
UpperCamelCase__ :Optional[int] = AlbertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ :Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ :Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ :Any = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = config_and_inputs
UpperCamelCase__ :List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
UpperCamelCase__ :Tuple = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
UpperCamelCase__ :Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ )
UpperCamelCase__ :str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = AlbertModelTester(self )
UpperCamelCase__ :int = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Any = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Optional[int] = AlbertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = AlbertModel.from_pretrained('''albert-base-v2''' )
UpperCamelCase__ :int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase__ :List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
UpperCamelCase__ :Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1e-4 ) ) | 97 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__snake_case = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
__snake_case = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
__snake_case = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = ['input_ids', 'attention_mask']
_a = []
_a = []
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
UpperCamelCase__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
UpperCamelCase__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
UpperCamelCase__ :Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ :Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :int = len(self.sp_model )
UpperCamelCase__ :Dict = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
UpperCamelCase__ :List[Any] = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase__ :Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase__ :Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase__ :Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase__ :Any = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase__ :Optional[Any] = self.lang_code_to_id[self._src_lang]
UpperCamelCase__ :Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.__dict__.copy()
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :Optional[int] = {}
UpperCamelCase__ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :List[str] = [1] * len(self.prefix_tokens )
UpperCamelCase__ :int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ :Tuple = src_lang
UpperCamelCase__ :Optional[Any] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.convert_tokens_to_ids(UpperCamelCase_ )
UpperCamelCase__ :Dict = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ :Any = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
UpperCamelCase__ :Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = "en_XX" , UpperCamelCase_ = None , UpperCamelCase_ = "ro_RO" , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = src_lang
UpperCamelCase__ :Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = self.lang_code_to_id[src_lang]
UpperCamelCase__ :int = []
UpperCamelCase__ :Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.lang_code_to_id[lang]
UpperCamelCase__ :Optional[Any] = []
UpperCamelCase__ :Tuple = [self.eos_token_id, self.cur_lang_code] | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__snake_case = logging.get_logger(__name__)
class lowercase ( A__ ):
"""simple docstring"""
_a = ['input_features', 'attention_mask']
def __init__( self , UpperCamelCase_=80 , UpperCamelCase_=16000 , UpperCamelCase_=80 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = num_mel_bins
UpperCamelCase__ :int = do_ceptral_normalize
UpperCamelCase__ :List[str] = normalize_means
UpperCamelCase__ :Union[str, Any] = normalize_vars
UpperCamelCase__ :Union[str, Any] = True
def lowerCAmelCase__ ( self , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCamelCase__ :Optional[int] = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 )
UpperCamelCase__ :Tuple = ta_kaldi.fbank(UpperCamelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = 0.0 , ):
'''simple docstring'''
if normalize_means:
UpperCamelCase__ :Optional[Any] = x[:input_length].mean(axis=0 )
UpperCamelCase__ :Optional[Any] = np.subtract(UpperCamelCase_ , UpperCamelCase_ )
if normalize_vars:
UpperCamelCase__ :Union[str, Any] = x[:input_length].std(axis=0 )
UpperCamelCase__ :List[str] = np.divide(UpperCamelCase_ , UpperCamelCase_ )
if input_length < x.shape[0]:
UpperCamelCase__ :int = padding_value
# make sure array is in float32
UpperCamelCase__ :List[str] = x.astype(np.floataa )
return x
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCamelCase_ , UpperCamelCase_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCamelCase_ , UpperCamelCase_ )
]
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase__ :Dict = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
UpperCamelCase__ :Tuple = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase__ :str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
UpperCamelCase__ :int = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase__ :Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase__ :str = [raw_speech]
# extract fbank features
UpperCamelCase__ :Tuple = [self._extract_fbank_features(UpperCamelCase_ ) for waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase__ :Optional[Any] = BatchFeature({'''input_features''': features} )
UpperCamelCase__ :Union[str, Any] = self.pad(
UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
# make sure list is in array format
UpperCamelCase__ :Dict = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCamelCase_ ):
UpperCamelCase__ :str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_features]
UpperCamelCase__ :Tuple = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCamelCase__ :Optional[int] = [np.asarray(UpperCamelCase_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCamelCase__ :Any = (
np.array(UpperCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase_ , max_length=UpperCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase__ :Any = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCamelCase_ )
if return_tensors is not None:
UpperCamelCase__ :Any = padded_inputs.convert_to_tensors(UpperCamelCase_ )
return padded_inputs | 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__snake_case = logging.get_logger(__name__)
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a ( __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :str = tokenizer(example['''content'''] , truncation=__a )['''input_ids''']
UpperCamelCase__ :str = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__snake_case = HfArgumentParser(PretokenizationArguments)
__snake_case = parser.parse_args()
if args.num_workers is None:
__snake_case = multiprocessing.cpu_count()
__snake_case = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case = time.time()
__snake_case = load_dataset(args.dataset_name, split='''train''')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
__snake_case = time.time()
__snake_case = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__snake_case = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""") | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
'''simple docstring'''
import random
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = a[left_index]
UpperCamelCase__ :Union[str, Any] = left_index + 1
for j in range(left_index + 1 , __a ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = a[i - 1], a[left_index]
return i - 1
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
if left < right:
UpperCamelCase__ :Any = random.randint(__a , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :Any = partition(__a , __a , __a )
quick_sort_random(
__a , __a , __a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__a , pivot_index + 1 , __a ) # recursive quicksort to the right of the pivot point
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :Any = input('''Enter numbers separated by a comma:\n''' ).strip()
UpperCamelCase__ :str = [int(__a ) for item in user_input.split(''',''' )]
quick_sort_random(__a , 0 , len(__a ) )
print(__a )
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
UpperCamelCase__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase__ :int = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
UpperCamelCase__ :Union[str, Any] = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ :List[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :str = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ :Union[str, Any] = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
UpperCamelCase__ :Any = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :List[Any] = self.get_tokenizer()
UpperCamelCase__ :Dict = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = self.prepare_image_inputs()
UpperCamelCase__ :List[str] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :Optional[int] = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.get_image_processor()
UpperCamelCase__ :List[str] = self.get_tokenizer()
UpperCamelCase__ :List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Any = '''lower newer'''
UpperCamelCase__ :Optional[Any] = processor(text=UpperCamelCase_ )
UpperCamelCase__ :Tuple = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.get_image_processor()
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = '''lower newer'''
UpperCamelCase__ :Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :List[str] = self.get_tokenizer()
UpperCamelCase__ :Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ :List[str] = processor.batch_decode(UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_image_processor()
UpperCamelCase__ :List[Any] = self.get_tokenizer()
UpperCamelCase__ :int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[str] = '''lower newer'''
UpperCamelCase__ :Tuple = self.prepare_image_inputs()
UpperCamelCase__ :Dict = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 97 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 1 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__snake_case = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
__snake_case = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
__snake_case = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="auto" , UpperCamelCase_=-1 , UpperCamelCase_=0.9 , UpperCamelCase_=5 , UpperCamelCase_=500 , UpperCamelCase_="gpt2-large" , UpperCamelCase_=-1 , UpperCamelCase_=1024 , UpperCamelCase_=25 , UpperCamelCase_=5 , UpperCamelCase_=True , UpperCamelCase_=25 , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = compute_mauve(
p_text=UpperCamelCase_ , q_text=UpperCamelCase_ , p_features=UpperCamelCase_ , q_features=UpperCamelCase_ , p_tokens=UpperCamelCase_ , q_tokens=UpperCamelCase_ , num_buckets=UpperCamelCase_ , pca_max_data=UpperCamelCase_ , kmeans_explained_var=UpperCamelCase_ , kmeans_num_redo=UpperCamelCase_ , kmeans_max_iter=UpperCamelCase_ , featurize_model_name=UpperCamelCase_ , device_id=UpperCamelCase_ , max_text_length=UpperCamelCase_ , divergence_curve_discretization_size=UpperCamelCase_ , mauve_scaling_factor=UpperCamelCase_ , verbose=UpperCamelCase_ , seed=UpperCamelCase_ , )
return out | 97 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 1 |
'''simple docstring'''
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Any = 1
UpperCamelCase__ :Optional[int] = 2
while i * i <= n:
UpperCamelCase__ :Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def a ( ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = 1
UpperCamelCase__ :Optional[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(__a ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution()) | 97 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a ( __a , __a , __a , __a , __a ) -> Dict:
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCamelCase__ :int = getattr(__a , __a )
if weight_type is not None:
UpperCamelCase__ :Tuple = getattr(__a , __a ).shape
else:
UpperCamelCase__ :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCamelCase__ :str = value
elif weight_type == "weight_g":
UpperCamelCase__ :int = value
elif weight_type == "weight_v":
UpperCamelCase__ :List[str] = value
elif weight_type == "bias":
UpperCamelCase__ :Union[str, Any] = value
else:
UpperCamelCase__ :Dict = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a ( __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :str = fairseq_model.state_dict()
UpperCamelCase__ :List[Any] = hf_model.feature_extractor
UpperCamelCase__ :Dict = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCamelCase__ :Tuple = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ :Any = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__a , __a , __a , __a )
UpperCamelCase__ :List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ :Tuple = True
if "*" in mapped_key:
UpperCamelCase__ :Any = name.split(__a )[0].split('''.''' )[-2]
UpperCamelCase__ :Optional[Any] = mapped_key.replace('''*''' , __a )
if "weight_g" in name:
UpperCamelCase__ :List[Any] = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ :Optional[Any] = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ :Tuple = '''bias'''
elif "weight" in name:
UpperCamelCase__ :Optional[Any] = '''weight'''
else:
UpperCamelCase__ :str = None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a ( __a , __a , __a , __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :int = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ :str = name.split('''.''' )
UpperCamelCase__ :int = int(items[0] )
UpperCamelCase__ :Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCamelCase__ :Any = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCamelCase__ :List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCamelCase__ :Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCamelCase__ :int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__a )
def a ( __a , __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Tuple = full_name.split('''adaptor.''' )[-1]
UpperCamelCase__ :int = name.split('''.''' )
if items[1].isdigit():
UpperCamelCase__ :List[str] = int(items[1] )
else:
UpperCamelCase__ :List[Any] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
UpperCamelCase__ :List[str] = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
UpperCamelCase__ :int = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
UpperCamelCase__ :Optional[Any] = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
UpperCamelCase__ :List[str] = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__a , __a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
UpperCamelCase__ :Dict = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
UpperCamelCase__ :str = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__a )
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Any = emb.weight.shape
UpperCamelCase__ :int = nn.Linear(__a , __a , bias=__a )
UpperCamelCase__ :Dict = emb.weight.data
return lin_layer
@torch.no_grad()
def a ( __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :str = WavaVecaConfig.from_pretrained(
__a , add_adapter=__a , adapter_stride=__a , adapter_kernel_size=__a , use_auth_token=__a , output_hidden_size=__a , )
UpperCamelCase__ :Dict = MBartConfig.from_pretrained(__a )
# load model
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
UpperCamelCase__ :List[str] = model[0].eval()
# load feature extractor
UpperCamelCase__ :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(__a , use_auth_token=__a )
# set weights for wav2vec2 encoder
UpperCamelCase__ :List[str] = WavaVecaModel(__a )
recursively_load_weights_wavaveca(model.encoder , __a )
# load decoder weights
UpperCamelCase__ :Tuple = MBartForCausalLM(__a )
UpperCamelCase__ , UpperCamelCase__ :int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__a )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
UpperCamelCase__ :str = SpeechEncoderDecoderModel(encoder=__a , decoder=__a )
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :List[Any] = MBartaaTokenizer(__a )
tokenizer.save_pretrained(__a )
UpperCamelCase__ :Dict = hf_wavavec.config.to_dict()
UpperCamelCase__ :Optional[int] = tokenizer.pad_token_id
UpperCamelCase__ :Tuple = tokenizer.bos_token_id
UpperCamelCase__ :Union[str, Any] = tokenizer.eos_token_id
UpperCamelCase__ :Any = '''mbart50'''
UpperCamelCase__ :Optional[Any] = '''wav2vec2'''
UpperCamelCase__ :List[Any] = tokenizer.eos_token_id
UpperCamelCase__ :Optional[int] = 250004
UpperCamelCase__ :Tuple = tokenizer.eos_token_id
UpperCamelCase__ :int = SpeechEncoderDecoderConfig.from_dict(__a )
hf_wavavec.save_pretrained(__a )
feature_extractor.save_pretrained(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=250004, type=int, help='''`decoder_start_token_id` of model config''')
__snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
) | 97 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a ( __a ) -> list[list[float]]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__a ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCamelCase__ :Optional[int] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
UpperCamelCase__ :List[Any] = [[0.0, 0.0], [0.0, 0.0]]
UpperCamelCase__ , UpperCamelCase__ :int = matrix[1][1], matrix[0][0]
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__a ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__a ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCamelCase__ :Tuple = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
UpperCamelCase__ :Any = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCamelCase__ :int = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCamelCase__ :Union[str, Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCamelCase__ :Tuple = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCamelCase__ :Any = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCamelCase__ :Dict = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCamelCase__ :Tuple = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCamelCase__ :List[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCamelCase__ :str = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCamelCase__ :Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCamelCase__ :Optional[int] = array(__a )
for i in range(3 ):
for j in range(3 ):
UpperCamelCase__ :Optional[int] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCamelCase__ :str = array(__a )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__a )
# Calculate the inverse of the matrix
return [[float(d(__a ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' ) | 97 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 1 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a ( __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def a ( __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :int = emb.weight.shape
UpperCamelCase__ :Tuple = nn.Linear(__a , __a , bias=__a )
UpperCamelCase__ :Dict = emb.weight.data
return lin_layer
def a ( __a ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = torch.load(__a , map_location='''cpu''' )
UpperCamelCase__ :Optional[int] = Namespace(**checkpoint['''cfg''']['''model'''] )
UpperCamelCase__ :Dict = checkpoint['''model''']
remove_ignore_keys_(__a )
UpperCamelCase__ :Optional[int] = state_dict['''decoder.embed_tokens.weight'''].shape[0]
UpperCamelCase__ :Optional[int] = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
UpperCamelCase__ :Union[str, Any] = XGLMConfig(
vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCamelCase__ :Any = XGLMForCausalLM(__a )
UpperCamelCase__ :Dict = model.load_state_dict(__a , strict=__a )
print(__a )
UpperCamelCase__ :Optional[int] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__snake_case = parser.parse_args()
__snake_case = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path) | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__snake_case = logging.get_logger(__name__)
def a ( __a , __a , __a , __a=False ) -> Dict:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
UpperCamelCase__ :Optional[Any] = os.path.abspath(__a )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
UpperCamelCase__ :Tuple = torch.load(__a , map_location='''cpu''' )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
UpperCamelCase__ :List[Any] = convert_pytorch_state_dict_to_flax(__a , __a )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ :Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(__a , __a )
return flax_state_dict
def a ( __a , __a , __a , __a , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(__a ) -> bool:
return len(set(__a ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ :Dict = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__a ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ :Dict = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__a ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ :Optional[Any] = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__a ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ :Optional[int] = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__a ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ :Tuple = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__a ):
UpperCamelCase__ :int = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ :Tuple = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__a ):
UpperCamelCase__ :str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ :List[str] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ :List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ :Tuple = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ :Tuple = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ :Union[str, Any] = pt_tuple_key[-2] + '''_v'''
if name is not None:
UpperCamelCase__ :Tuple = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ :List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ :Optional[Any] = flax_model.params['''params''']
else:
UpperCamelCase__ :Optional[Any] = flax_model.params
UpperCamelCase__ :Union[str, Any] = flatten_dict(__a )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ :List[str] = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__a )
UpperCamelCase__ :Any = {}
UpperCamelCase__ :Optional[int] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ :Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ :int = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCamelCase__ :Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ :List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ :int = rename_key_and_reshape_tensor(
__a , __a , __a , __a )
# add model prefix if necessary
UpperCamelCase__ :int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ :Optional[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ :Union[str, Any] = jnp.asarray(__a )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__a , __a )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ :Tuple = jnp.asarray(__a )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ :Union[str, Any] = jnp.asarray(__a )
return unflatten_dict(__a )
def a ( __a , __a ) -> List[str]:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ :int = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ :List[Any] = torch.load(__a )
UpperCamelCase__ :List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ :List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ :int = flax_model.params['''params''']
UpperCamelCase__ :Union[str, Any] = flatten_dict(__a )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
UpperCamelCase__ :Dict = flax_model.params
UpperCamelCase__ :int = flatten_dict(__a )
UpperCamelCase__ :List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ :int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ :str = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCamelCase__ :Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ :Dict = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = rename_key_and_reshape_tensor(
__a , __a , __a , __a )
# add model prefix if necessary
UpperCamelCase__ :Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ :List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ :List[Any] = jnp.asarray(__a )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ :List[Any] = jnp.asarray(__a )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__a , __a )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ :str = jnp.asarray(__a )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ :List[Any] = jnp.asarray(__a )
return unflatten_dict(__a )
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Tuple = os.path.abspath(__a )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
UpperCamelCase__ :str = getattr(__a , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__a , '''rb''' ) as state_f:
try:
UpperCamelCase__ :Tuple = from_bytes(__a , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__a , __a )
def a ( __a , __a ) -> int:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
UpperCamelCase__ :List[Any] = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values()
if any(__a ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
UpperCamelCase__ :str = jax.tree_util.tree_map(
lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a )
UpperCamelCase__ :Optional[Any] = flatten_dict(__a )
UpperCamelCase__ :str = pt_model.state_dict()
UpperCamelCase__ :List[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ :Optional[int] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ :Optional[Any] = []
UpperCamelCase__ :str = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ :Tuple = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ :int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ :str = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ :List[str] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__a ) not in pt_model_dict:
# conv layer
UpperCamelCase__ :Any = flax_key_tuple[:-1] + ('''weight''',)
UpperCamelCase__ :Tuple = jnp.transpose(__a , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__a ) not in pt_model_dict:
# linear layer
UpperCamelCase__ :Dict = flax_key_tuple[:-1] + ('''weight''',)
UpperCamelCase__ :Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ :Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ :Optional[Any] = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ :List[Any] = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
UpperCamelCase__ :Optional[int] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ :Any = '''.'''.join(__a )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ :Dict = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ :Dict = key.split('''.''' )
UpperCamelCase__ :Union[str, Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ :Union[str, Any] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ :List[Any] = key_components[-2] + '''_v'''
if name is not None:
UpperCamelCase__ :Dict = key_components[:-3] + [name]
UpperCamelCase__ :Optional[int] = '''.'''.join(__a )
UpperCamelCase__ :Any = key
if flax_key in special_pt_names:
UpperCamelCase__ :Tuple = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCamelCase__ :Dict = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor
UpperCamelCase__ :Optional[int] = torch.from_numpy(__a )
# remove from missing keys
missing_keys.remove(__a )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__a )
pt_model.load_state_dict(__a )
# re-transform missing_keys to list
UpperCamelCase__ :List[str] = list(__a )
if len(__a ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__a ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'trajectory_transformer'
_a = ['past_key_values']
_a = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCamelCase_=100 , UpperCamelCase_=5 , UpperCamelCase_=1 , UpperCamelCase_=1 , UpperCamelCase_=249 , UpperCamelCase_=6 , UpperCamelCase_=17 , UpperCamelCase_=25 , UpperCamelCase_=4 , UpperCamelCase_=4 , UpperCamelCase_=128 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0006 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=50256 , UpperCamelCase_=50256 , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :str = action_weight
UpperCamelCase__ :Dict = reward_weight
UpperCamelCase__ :Optional[int] = value_weight
UpperCamelCase__ :List[Any] = max_position_embeddings
UpperCamelCase__ :int = block_size
UpperCamelCase__ :Optional[Any] = action_dim
UpperCamelCase__ :Union[str, Any] = observation_dim
UpperCamelCase__ :int = transition_dim
UpperCamelCase__ :int = learning_rate
UpperCamelCase__ :int = n_layer
UpperCamelCase__ :int = n_head
UpperCamelCase__ :List[Any] = n_embd
UpperCamelCase__ :Optional[int] = embd_pdrop
UpperCamelCase__ :Dict = attn_pdrop
UpperCamelCase__ :List[str] = resid_pdrop
UpperCamelCase__ :Optional[Any] = initializer_range
UpperCamelCase__ :Any = layer_norm_eps
UpperCamelCase__ :Optional[int] = kaiming_initializer_range
UpperCamelCase__ :Dict = use_cache
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) | 97 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'layoutlmv3'
def __init__( self , UpperCamelCase_=50265 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-5 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=1024 , UpperCamelCase_=128 , UpperCamelCase_=128 , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=128 , UpperCamelCase_=64 , UpperCamelCase_=256 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=224 , UpperCamelCase_=3 , UpperCamelCase_=16 , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase_ , hidden_size=UpperCamelCase_ , num_hidden_layers=UpperCamelCase_ , num_attention_heads=UpperCamelCase_ , intermediate_size=UpperCamelCase_ , hidden_act=UpperCamelCase_ , hidden_dropout_prob=UpperCamelCase_ , attention_probs_dropout_prob=UpperCamelCase_ , max_position_embeddings=UpperCamelCase_ , type_vocab_size=UpperCamelCase_ , initializer_range=UpperCamelCase_ , layer_norm_eps=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Tuple = max_ad_position_embeddings
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Union[str, Any] = shape_size
UpperCamelCase__ :int = has_relative_attention_bias
UpperCamelCase__ :Any = rel_pos_bins
UpperCamelCase__ :Tuple = max_rel_pos
UpperCamelCase__ :Optional[Any] = has_spatial_attention_bias
UpperCamelCase__ :Union[str, Any] = rel_ad_pos_bins
UpperCamelCase__ :Union[str, Any] = max_rel_ad_pos
UpperCamelCase__ :Dict = text_embed
UpperCamelCase__ :Optional[Any] = visual_embed
UpperCamelCase__ :Dict = input_size
UpperCamelCase__ :List[Any] = num_channels
UpperCamelCase__ :Optional[int] = patch_size
UpperCamelCase__ :str = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
_a = version.parse('1.12' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 1e-5
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 12
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = 3 , UpperCamelCase_ = 40 , UpperCamelCase_ = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ :Optional[int] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ :Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ :Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ :str = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ :Optional[Any] = self._generate_dummy_images(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Any = dict(
processor(
UpperCamelCase_ , text=UpperCamelCase_ , boxes=UpperCamelCase_ , return_tensors=UpperCamelCase_ , ) )
return inputs | 97 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def a ( __a , __a , __a , __a , __a , __a ) -> np.ndarray:
'''simple docstring'''
if (ksize % 2) == 0:
UpperCamelCase__ :List[str] = ksize + 1
UpperCamelCase__ :List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__a ):
for x in range(__a ):
# distance from center
UpperCamelCase__ :List[str] = x - ksize // 2
UpperCamelCase__ :Union[str, Any] = y - ksize // 2
# degree to radiant
UpperCamelCase__ :Optional[Any] = theta / 180 * np.pi
UpperCamelCase__ :int = np.cos(_theta )
UpperCamelCase__ :Optional[int] = np.sin(_theta )
# get kernel x
UpperCamelCase__ :Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
UpperCamelCase__ :Any = -sin_theta * px + cos_theta * py
# fill kernel
UpperCamelCase__ :Dict = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__snake_case = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
__snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__snake_case = out / out.max() * 255
__snake_case = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0) | 97 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'mctct'
def __init__( self , UpperCamelCase_=8065 , UpperCamelCase_=1536 , UpperCamelCase_=36 , UpperCamelCase_=6144 , UpperCamelCase_=4 , UpperCamelCase_=384 , UpperCamelCase_=920 , UpperCamelCase_=1e-5 , UpperCamelCase_=0.3 , UpperCamelCase_="relu" , UpperCamelCase_=0.02 , UpperCamelCase_=0.3 , UpperCamelCase_=0.3 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0.3 , UpperCamelCase_=1 , UpperCamelCase_=(7,) , UpperCamelCase_=(3,) , UpperCamelCase_=80 , UpperCamelCase_=1 , UpperCamelCase_=None , UpperCamelCase_="sum" , UpperCamelCase_=False , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
UpperCamelCase__ :Dict = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Tuple = num_hidden_layers
UpperCamelCase__ :Optional[Any] = intermediate_size
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[Any] = attention_head_dim
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = layer_norm_eps
UpperCamelCase__ :Optional[int] = layerdrop
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Any = initializer_range
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[Any] = pad_token_id
UpperCamelCase__ :Union[str, Any] = bos_token_id
UpperCamelCase__ :Tuple = eos_token_id
UpperCamelCase__ :str = conv_glu_dim
UpperCamelCase__ :int = conv_dropout
UpperCamelCase__ :int = num_conv_layers
UpperCamelCase__ :Union[str, Any] = input_feat_per_channel
UpperCamelCase__ :Any = input_channels
UpperCamelCase__ :str = conv_channels
UpperCamelCase__ :Union[str, Any] = ctc_loss_reduction
UpperCamelCase__ :List[str] = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase__ :Tuple = list(UpperCamelCase_ )
UpperCamelCase__ :int = list(UpperCamelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) | 97 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = '''laion/clap-htsat-unfused'''
UpperCamelCase__ :Optional[int] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :Tuple = self.get_feature_extractor()
UpperCamelCase__ :Union[str, Any] = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Optional[int] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ :Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
UpperCamelCase__ :int = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.get_feature_extractor()
UpperCamelCase__ :int = self.get_tokenizer()
UpperCamelCase__ :List[Any] = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
UpperCamelCase__ :Any = floats_list((3, 1000) )
UpperCamelCase__ :Union[str, Any] = feature_extractor(UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :Dict = processor(audios=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.get_feature_extractor()
UpperCamelCase__ :Optional[int] = self.get_tokenizer()
UpperCamelCase__ :Tuple = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = '''This is a test string'''
UpperCamelCase__ :int = processor(text=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_feature_extractor()
UpperCamelCase__ :List[str] = self.get_tokenizer()
UpperCamelCase__ :int = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
UpperCamelCase__ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ :str = processor.batch_decode(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_feature_extractor()
UpperCamelCase__ :List[str] = self.get_tokenizer()
UpperCamelCase__ :Tuple = ClapProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) | 97 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 1 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=sys.maxsize ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = '''bilinear'''
UpperCamelCase__ :Any = max_size
UpperCamelCase__ :str = short_edge_length
def __call__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = []
for img in imgs:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCamelCase__ :int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCamelCase__ :Optional[int] = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
UpperCamelCase__ , UpperCamelCase__ :Any = size, scale * w
else:
UpperCamelCase__ , UpperCamelCase__ :Tuple = scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size:
UpperCamelCase__ :Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Any = newh * scale
UpperCamelCase__ :str = neww * scale
UpperCamelCase__ :Union[str, Any] = int(neww + 0.5 )
UpperCamelCase__ :Any = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCamelCase__ :str = Image.fromarray(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCamelCase__ :List[str] = np.asarray(UpperCamelCase_ )
else:
UpperCamelCase__ :Optional[int] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCamelCase__ :str = nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 )
img_augs.append(UpperCamelCase_ )
return img_augs
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCamelCase__ :Optional[int] = cfg.INPUT.FORMAT
UpperCamelCase__ :Optional[Any] = cfg.SIZE_DIVISIBILITY
UpperCamelCase__ :Tuple = cfg.PAD_VALUE
UpperCamelCase__ :Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
UpperCamelCase__ :List[str] = cfg.MODEL.DEVICE
UpperCamelCase__ :str = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__ :List[str] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__ :List[Any] = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) )
UpperCamelCase__ :Optional[Any] = [im.shape[-2:] for im in images]
UpperCamelCase__ :Dict = [
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Union[str, Any] = [images]
if single_image:
assert len(UpperCamelCase_ ) == 1
for i in range(len(UpperCamelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCamelCase__ :Tuple = torch.tensor([im.shape[:2] for im in images] )
UpperCamelCase__ :Optional[Any] = self.aug(UpperCamelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCamelCase__ :Tuple = [self.normalizer(UpperCamelCase_ ) for x in images]
# now pad them to do the following operations
UpperCamelCase__ , UpperCamelCase__ :Dict = self.pad(UpperCamelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCamelCase__ :Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def a ( __a , __a ) -> str:
'''simple docstring'''
assert torch.isfinite(__a ).all(), "Box tensor contains infinite or NaN!"
UpperCamelCase__ , UpperCamelCase__ :Tuple = box_size
tensor[:, 0].clamp_(min=0 , max=__a )
tensor[:, 1].clamp_(min=0 , max=__a )
tensor[:, 2].clamp_(min=0 , max=__a )
tensor[:, 3].clamp_(min=0 , max=__a ) | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
'''simple docstring'''
from ... import PretrainedConfig
__snake_case = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_a = 'nezha'
def __init__( self , UpperCamelCase_=21128 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=64 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0.1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = vocab_size
UpperCamelCase__ :Dict = hidden_size
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :List[Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :int = max_relative_position
UpperCamelCase__ :Optional[int] = type_vocab_size
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :Any = layer_norm_eps
UpperCamelCase__ :Dict = classifier_dropout
UpperCamelCase__ :Optional[int] = use_cache | 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 1 |
'''simple docstring'''
import os
def a ( __a = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__a ) , __a ) ) as input_file:
UpperCamelCase__ :str = [
[int(__a ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
UpperCamelCase__ :Any = len(__a )
UpperCamelCase__ :Optional[Any] = len(matrix[0] )
UpperCamelCase__ :Any = [[-1 for _ in range(__a )] for _ in range(__a )]
for i in range(__a ):
UpperCamelCase__ :List[Any] = matrix[i][0]
for j in range(1 , __a ):
for i in range(__a ):
UpperCamelCase__ :List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __a ):
UpperCamelCase__ :List[str] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCamelCase__ :List[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
import math
import sys
def a ( __a ) -> int:
'''simple docstring'''
if number != int(__a ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
UpperCamelCase__ :str = [-1] * (number + 1)
UpperCamelCase__ :Optional[int] = 0
for i in range(1 , number + 1 ):
UpperCamelCase__ :List[Any] = sys.maxsize
UpperCamelCase__ :int = int(math.sqrt(__a ) )
for j in range(1 , root + 1 ):
UpperCamelCase__ :int = 1 + answers[i - (j**2)]
UpperCamelCase__ :str = min(__a , __a )
UpperCamelCase__ :Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = inspect.getfile(accelerate.test_utils )
UpperCamelCase__ :Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
UpperCamelCase__ :Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
UpperCamelCase__ :List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def lowerCAmelCase__ ( self ):
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase__ :List[str] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase__ ( self ):
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase__ :Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase__ ( self ):
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
UpperCamelCase__ :Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = Accelerator()
__snake_case = (accelerator.state.process_index + 2, 10)
__snake_case = torch.randint(0, 10, shape).to(accelerator.device)
__snake_case = ''''''
__snake_case = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__snake_case = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__snake_case = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ :Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ :Optional[int] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ :Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ :Tuple = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ :int = 8
else:
UpperCamelCase__ :Dict = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCamelCase__ :List[str] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCamelCase__ :Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __a ) == "1":
UpperCamelCase__ :List[str] = 2
# New Code #
UpperCamelCase__ :Dict = int(args.gradient_accumulation_steps )
UpperCamelCase__ :str = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase__ :List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Tuple = config['''lr''']
UpperCamelCase__ :str = int(config['''num_epochs'''] )
UpperCamelCase__ :int = int(config['''seed'''] )
UpperCamelCase__ :Optional[Any] = int(config['''batch_size'''] )
UpperCamelCase__ :List[str] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :str = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :List[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ :Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ :Dict = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
UpperCamelCase__ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
with LocalSGD(
accelerator=__a , model=__a , local_sgd_steps=__a , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__a ):
UpperCamelCase__ :Dict = model(**__a )
UpperCamelCase__ :Optional[Any] = output.loss
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :int = model(**__a )
UpperCamelCase__ :List[str] = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ :List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __a )
def a ( ) -> Any:
'''simple docstring'''
UpperCamelCase__ :int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__a , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=__a , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCamelCase__ :List[Any] = parser.parse_args()
UpperCamelCase__ :Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = 2
UpperCamelCase__ :Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__a )
if n > 1:
factors.append(__a )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 1 |
'''simple docstring'''
from torch import nn
def a ( __a ) -> List[str]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 97 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def a ( ) -> Generator[int, None, None]:
'''simple docstring'''
UpperCamelCase__ :dict[int, int] = {}
UpperCamelCase__ :Tuple = 2
while True:
UpperCamelCase__ :str = factor_map.pop(__a , __a )
if factor:
UpperCamelCase__ :List[str] = factor + prime
while x in factor_map:
x += factor
UpperCamelCase__ :Optional[Any] = factor
else:
UpperCamelCase__ :List[str] = prime
yield prime
prime += 1
def a ( __a = 1e10 ) -> int:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = sieve()
UpperCamelCase__ :str = 1
while True:
UpperCamelCase__ :Optional[Any] = next(__a )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__a )
n += 2
if __name__ == "__main__":
print(solution()) | 97 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 1 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__snake_case = True
except ImportError:
__snake_case = False
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowercase ( A__ ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=UpperCamelCase_ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=UpperCamelCase_ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , *UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = testing
UpperCamelCase__ :Optional[int] = testing_file
UpperCamelCase__ :Dict = path
def lowerCAmelCase__ ( self ):
'''simple docstring'''
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase__ :Optional[int] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(UpperCamelCase_ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase__ :Optional[int] = (
Path(UpperCamelCase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase__ :int = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase_ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase__ :Union[str, Any] = json.load(UpperCamelCase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=UpperCamelCase_ , extra_context=UpperCamelCase_ , )
UpperCamelCase__ :List[Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase__ :Any = json.load(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = configuration['''lowercase_modelname''']
UpperCamelCase__ :Union[str, Any] = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F'''{directory}/configuration.json''' )
UpperCamelCase__ :str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ :str = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ :List[str] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ :List[Any] = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=UpperCamelCase_ )
# Tests require submodules as they have parent imports
with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , '''w''' ):
pass
shutil.move(
F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , )
shutil.move(
F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(UpperCamelCase_ ):
with open(UpperCamelCase_ , '''r''' ) as f:
UpperCamelCase__ :List[str] = f.readlines()
with open(UpperCamelCase_ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# Create temp file
UpperCamelCase__ , UpperCamelCase__ :List[str] = mkstemp()
UpperCamelCase__ :Optional[int] = False
with fdopen(UpperCamelCase_ , '''w''' ) as new_file:
with open(UpperCamelCase_ ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase_ )
if line_to_copy_below in line:
UpperCamelCase__ :int = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase_ )
if not line_found:
raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase_ , UpperCamelCase_ )
# Remove original file
remove(UpperCamelCase_ )
# Move new file
move(UpperCamelCase_ , UpperCamelCase_ )
def skip_units(UpperCamelCase_ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase_ ):
with open(UpperCamelCase_ ) as datafile:
UpperCamelCase__ :Union[str, Any] = []
UpperCamelCase__ :int = False
UpperCamelCase__ :Dict = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase__ :List[str] = line.split('''"''' )[1]
UpperCamelCase__ :int = skip_units(UpperCamelCase_ )
elif "# Below: " in line and "##" not in line:
UpperCamelCase__ :str = line.split('''"''' )[1]
UpperCamelCase__ :Tuple = skip_units(UpperCamelCase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase__ :str = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase_ )
remove(UpperCamelCase_ )
replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(UpperCamelCase_ ) | 97 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def a ( __a ) -> typing.Counter[int]:
'''simple docstring'''
UpperCamelCase__ :typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__a , max_perimeter + 1 ):
UpperCamelCase__ :int = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
UpperCamelCase__ :Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def a ( __a = 1000 ) -> int:
'''simple docstring'''
UpperCamelCase__ :int = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""") | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowercase ( A__ ):
"""simple docstring"""
_a = 'microsoft/speecht5_tts'
_a = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
_a = 'text_reader'
_a = SpeechTaProcessor
_a = SpeechTaForTextToSpeech
_a = SpeechTaHifiGan
_a = ['text']
_a = ['audio']
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.post_processor is None:
UpperCamelCase__ :int = '''microsoft/speecht5_hifigan'''
super().setup()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :str = self.pre_processor(text=UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
UpperCamelCase__ :Union[str, Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
UpperCamelCase__ :int = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(UpperCamelCase_ ).cpu().detach() | 97 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__snake_case = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
UpperCamelCase__ :Any = [image]
UpperCamelCase__ :Any = [trans(img.convert('''RGB''' ) ) for img in image]
UpperCamelCase__ :List[str] = torch.stack(__a )
return image
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase__ :str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = min(int(num_inference_steps * strength ) , UpperCamelCase_ )
UpperCamelCase__ :List[str] = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ :Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase_ )}''' )
UpperCamelCase__ :List[str] = image.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase__ :List[Any] = init_latents.shape
UpperCamelCase__ :int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
# get latents
print('''add noise to latents at timestep''' , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Any = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCamelCase_ = None , UpperCamelCase_ = 0.8 , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 50 , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ):
'''simple docstring'''
self.check_inputs(UpperCamelCase_ )
# 2. Preprocess image
UpperCamelCase__ :Tuple = preprocess(UpperCamelCase_ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.get_timesteps(UpperCamelCase_ , UpperCamelCase_ , self.device )
UpperCamelCase__ :Tuple = timesteps[:1].repeat(UpperCamelCase_ )
# 4. Prepare latent variables
UpperCamelCase__ :int = self.prepare_latents(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.unet.dtype , self.device , UpperCamelCase_ )
UpperCamelCase__ :int = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCamelCase_ ):
# 1. predict noise model_output
UpperCamelCase__ :Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase__ :Dict = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ , ).prev_sample
UpperCamelCase__ :Dict = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ :Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ :Dict = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCamelCase_ ) | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
'''simple docstring'''
from math import isclose, sqrt
def a ( __a , __a , __a ) -> tuple[float, float, float]:
'''simple docstring'''
UpperCamelCase__ :List[str] = point_y / 4 / point_x
UpperCamelCase__ :str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCamelCase__ :Dict = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCamelCase__ :Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCamelCase__ :Optional[int] = outgoing_gradient**2 + 4
UpperCamelCase__ :List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCamelCase__ :Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCamelCase__ :List[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCamelCase__ :Any = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCamelCase__ :int = x_minus if isclose(__a , __a ) else x_plus
UpperCamelCase__ :List[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a ( __a = 1.4 , __a = -9.6 ) -> int:
'''simple docstring'''
UpperCamelCase__ :int = 0
UpperCamelCase__ :float = first_x_coord
UpperCamelCase__ :float = first_y_coord
UpperCamelCase__ :float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = next_point(__a , __a , __a )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.